summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-12-16 18:33:49 +0100
committerIngo Molnar <mingo@elte.hu>2009-12-16 18:33:49 +0100
commitee1156c11a1121e118b0a7f2dec240f0d421b1fd (patch)
treeb8771cc5a9758af9d7410fc519227c036c222130
parentb9f8fcd55bbdb037e5332dbdb7b494f0b70861ac (diff)
parent8bea8672edfca7ec5f661cafb218f1205863b343 (diff)
Merge branch 'linus' into sched/urgent
Conflicts: kernel/sched_idletask.c Merge reason: resolve the conflicts, pick up latest changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory14
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu14
-rw-r--r--Documentation/device-mapper/snapshot.txt60
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/filesystems/proc.txt9
-rw-r--r--Documentation/hwmon/lis3lv02d55
-rw-r--r--Documentation/hwmon/w83627ehf10
-rw-r--r--Documentation/i2c/writing-clients2
-rw-r--r--Documentation/md.txt72
-rw-r--r--Documentation/memory-hotplug.txt11
-rw-r--r--Documentation/misc-devices/ad525x_dpot.txt57
-rw-r--r--Documentation/nommu-mmap.txt26
-rw-r--r--Documentation/spinlocks.txt184
-rw-r--r--Documentation/sysctl/kernel.txt31
-rw-r--r--Documentation/vm/hugetlbpage.txt262
-rw-r--r--Documentation/vm/ksm.txt22
-rw-r--r--Documentation/vm/page-types.c68
-rw-r--r--MAINTAINERS24
-rw-r--r--arch/alpha/include/asm/core_t2.h34
-rw-r--r--arch/alpha/include/asm/spinlock.h38
-rw-r--r--arch/alpha/include/asm/spinlock_types.h8
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/irq.c4
-rw-r--r--arch/alpha/kernel/srm_env.c65
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.debug8
-rw-r--r--arch/arm/configs/zeus_defconfig2032
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/spinlock.h40
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/early_printk.c57
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/kernel/smp_twd.c1
-rw-r--r--arch/arm/mach-at91/include/mach/atmel-mci.h24
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-footbridge/common.c22
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h15
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h3
-rw-r--r--arch/arm/mach-ixp2000/include/mach/memory.h12
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-lh7a40x/clocks.c8
-rw-r--r--arch/arm/mach-ns9xxx/irq.c8
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c2
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c118
-rw-r--r--arch/arm/mach-pxa/Kconfig14
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/em-x270.c11
-rw-r--r--arch/arm/mach-pxa/include/mach/arcom-pcmcia.h11
-rw-r--r--arch/arm/mach-pxa/include/mach/viper.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/zeus.h82
-rw-r--r--arch/arm/mach-pxa/viper.c20
-rw-r--r--arch/arm/mach-pxa/zeus.c820
-rw-r--r--arch/arm/mach-realview/Kconfig2
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c3
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/Kconfig13
-rw-r--r--arch/arm/mach-sa1100/generic.c12
-rw-r--r--arch/arm/mach-w90x900/include/mach/nuc900_spi.h35
-rw-r--r--arch/arm/plat-omap/debug-leds.c2
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h16
-rw-r--r--arch/arm/vfp/vfpmodule.c83
-rw-r--r--arch/avr32/Kconfig13
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/atngw100/Kconfig25
-rw-r--r--arch/avr32/boards/atngw100/evklcd10x.c7
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/boards/atngw100/setup.c121
-rw-r--r--arch/avr32/configs/atngw100_defconfig383
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig605
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig599
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1414
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1549
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1549
-rw-r--r--arch/avr32/configs/atstk1002_defconfig415
-rw-r--r--arch/avr32/configs/atstk1006_defconfig297
-rw-r--r--arch/avr32/include/asm/hardirq.h19
-rw-r--r--arch/avr32/kernel/irq.c13
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S64
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c53
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h24
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/include/asm/spinlock.h62
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h8
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/traps.c4
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/frv/kernel/irq.c4
-rw-r--r--arch/h8300/kernel/irq.c4
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c27
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c129
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
-rw-r--r--arch/m32r/include/asm/spinlock.h48
-rw-r--r--arch/m32r/include/asm/spinlock_types.h8
-rw-r--r--arch/m32r/kernel/irq.c4
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/m68k/sun3/mmu_emu.c8
-rw-r--r--arch/microblaze/Kconfig19
-rw-r--r--arch/microblaze/Kconfig.debug3
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/boot/Makefile15
-rw-r--r--arch/microblaze/include/asm/cache.h16
-rw-r--r--arch/microblaze/include/asm/cacheflush.h123
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h5
-rw-r--r--arch/microblaze/include/asm/device.h12
-rw-r--r--arch/microblaze/include/asm/ftrace.h25
-rw-r--r--arch/microblaze/include/asm/futex.h127
-rw-r--r--arch/microblaze/include/asm/irqflags.h112
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/pgalloc.h9
-rw-r--r--arch/microblaze/include/asm/pvr.h30
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/include/asm/system.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h12
-rw-r--r--arch/microblaze/kernel/Makefile14
-rw-r--r--arch/microblaze/kernel/cpu/Makefile4
-rw-r--r--arch/microblaze/kernel/cpu/cache.c663
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c15
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c17
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c7
-rw-r--r--arch/microblaze/kernel/cpu/mb.c8
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c2
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/entry.S19
-rw-r--r--arch/microblaze/kernel/ftrace.c237
-rw-r--r--arch/microblaze/kernel/heartbeat.c15
-rw-r--r--arch/microblaze/kernel/intc.c10
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/microblaze/kernel/mcount.S170
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c5
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/reset.c140
-rw-r--r--arch/microblaze/kernel/setup.c40
-rw-r--r--arch/microblaze/kernel/signal.c35
-rw-r--r--arch/microblaze/kernel/stacktrace.c65
-rw-r--r--arch/microblaze/kernel/syscall_table.S4
-rw-r--r--arch/microblaze/kernel/timer.c28
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S6
-rw-r--r--arch/microblaze/lib/uaccess.c7
-rw-r--r--arch/microblaze/mm/init.c1
-rw-r--r--arch/microblaze/mm/pgtable.c10
-rw-r--r--arch/microblaze/oprofile/Makefile13
-rw-r--r--arch/microblaze/oprofile/microblaze_oprofile.c22
-rw-r--r--arch/microblaze/platform/Kconfig.platform21
-rw-r--r--arch/microblaze/platform/generic/Kconfig.auto29
-rw-r--r--arch/microblaze/platform/generic/system.dts38
-rw-r--r--arch/microblaze/platform/platform.c2
-rw-r--r--arch/mips/include/asm/spinlock.h78
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c92
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/mn10300/kernel/kprobes.c61
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/lib/bitops.c4
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h68
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/perf_callchain.c4
-rw-r--r--arch/powerpc/kernel/rtas.c16
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/lib/locks.c8
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c22
-rw-r--r--arch/powerpc/platforms/iseries/irq.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/uic.c8
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/spinlock.h66
-rw-r--r--arch/s390/include/asm/spinlock_types.h8
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/lib/spinlock.c46
-rw-r--r--arch/sh/include/asm/spinlock.h58
-rw-r--r--arch/sh/include/asm/spinlock_types.h8
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Kconfig.debug14
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/sparc/kernel/kprobes.c3
-rw-r--r--arch/sparc/kernel/ldc.c4
-rw-r--r--arch/sparc/kernel/mdesc.c21
-rw-r--r--arch/sparc/kernel/nmi.c8
-rw-r--r--arch/sparc/kernel/of_device_64.c14
-rw-r--r--arch/sparc/kernel/ptrace_64.c10
-rw-r--r--arch/sparc/kernel/syscalls.S14
-rw-r--r--arch/sparc/kernel/time_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_32.c15
-rw-r--r--arch/sparc/kernel/unaligned_64.c23
-rw-r--r--arch/sparc/kernel/visemul.c3
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/bzero.S5
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/mcount.S5
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memset.S3
-rw-r--r--arch/sparc/lib/usercopy.c8
-rw-r--r--arch/sparc/math-emu/math_32.c3
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_64.c24
-rw-r--r--arch/um/drivers/mconsole_kern.c30
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--arch/um/kernel/exitcode.c43
-rw-r--r--arch/um/kernel/irq.c4
-rw-r--r--arch/um/kernel/process.c31
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/geode.h219
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/include/asm/spinlock.h62
-rw-r--r--arch/x86/include/asm/spinlock_types.h10
-rw-r--r--arch/x86/include/asm/topology.h9
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/aperture_64.c11
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c16
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c67
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c11
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kernel/dumpstack.c8
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/geode_32.c196
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/irq.c14
-rw-r--r--arch/x86/kernel/mfgpt_32.c410
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/olpc.c4
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c4
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c2
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/smpboot.c45
-rw-r--r--arch/x86/kernel/trampoline.c20
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/lib/msr.c26
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--arch/x86/xen/time.c24
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--block/cfq-iosched.c94
-rw-r--r--crypto/cryptd.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/xd.c30
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/mem.c161
-rw-r--r--drivers/char/misc.c26
-rw-r--r--drivers/char/nvram.c14
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/vt.c43
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c197
-rw-r--r--drivers/cpufreq/cpufreq.c16
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpuidle/governors/ladder.c3
-rw-r--r--drivers/crypto/padlock-aes.c12
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/dmaengine.c36
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/edac/amd64_edac.c46
-rw-r--r--drivers/firmware/Kconfig4
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/gpio/Kconfig10
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/adp5520-gpio.c36
-rw-r--r--drivers/gpio/cs5535-gpio.c355
-rw-r--r--drivers/gpio/twl4030-gpio.c20
-rw-r--r--drivers/gpio/wm831x-gpio.c17
-rw-r--r--drivers/hwmon/Kconfig22
-rw-r--r--drivers/hwmon/adm1021.c11
-rw-r--r--drivers/hwmon/adm1025.c12
-rw-r--r--drivers/hwmon/adm1026.c11
-rw-r--r--drivers/hwmon/adm1029.c14
-rw-r--r--drivers/hwmon/adm1031.c9
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/ads7828.c13
-rw-r--r--drivers/hwmon/adt7462.c11
-rw-r--r--drivers/hwmon/adt7470.c11
-rw-r--r--drivers/hwmon/adt7473.c11
-rw-r--r--drivers/hwmon/adt7475.c6
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asb100.c11
-rw-r--r--drivers/hwmon/atxp1.c11
-rw-r--r--drivers/hwmon/dme1737.c10
-rw-r--r--drivers/hwmon/ds1621.c9
-rw-r--r--drivers/hwmon/f75375s.c9
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/gl518sm.c11
-rw-r--r--drivers/hwmon/gl520sm.c13
-rw-r--r--drivers/hwmon/lis3lv02d.c231
-rw-r--r--drivers/hwmon/lis3lv02d.h51
-rw-r--r--drivers/hwmon/lm63.c15
-rw-r--r--drivers/hwmon/lm73.c9
-rw-r--r--drivers/hwmon/lm75.c14
-rw-r--r--drivers/hwmon/lm77.c12
-rw-r--r--drivers/hwmon/lm78.c9
-rw-r--r--drivers/hwmon/lm80.c13
-rw-r--r--drivers/hwmon/lm83.c12
-rw-r--r--drivers/hwmon/lm85.c16
-rw-r--r--drivers/hwmon/lm87.c12
-rw-r--r--drivers/hwmon/lm90.c14
-rw-r--r--drivers/hwmon/lm92.c9
-rw-r--r--drivers/hwmon/lm93.c8
-rw-r--r--drivers/hwmon/lm95241.c9
-rw-r--r--drivers/hwmon/max1619.c14
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/pcf8591.c5
-rw-r--r--drivers/hwmon/smsc47m192.c11
-rw-r--r--drivers/hwmon/thmc50.c8
-rw-r--r--drivers/hwmon/tmp401.c9
-rw-r--r--drivers/hwmon/tmp421.c7
-rw-r--r--drivers/hwmon/w83627ehf.c72
-rw-r--r--drivers/hwmon/w83781d.c10
-rw-r--r--drivers/hwmon/w83791d.c9
-rw-r--r--drivers/hwmon/w83792d.c9
-rw-r--r--drivers/hwmon/w83793.c9
-rw-r--r--drivers/hwmon/w83l785ts.c14
-rw-r--r--drivers/hwmon/w83l786ng.c10
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/i2c-core.c52
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c220
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c6
-rw-r--r--drivers/input/misc/bfin_rotary.c2
-rw-r--r--drivers/input/misc/pcf50633-input.c7
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c4
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c3
-rw-r--r--drivers/isdn/hisax/avma1_cs.c3
-rw-r--r--drivers/isdn/hisax/elsa_cs.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/teles_cs.c2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/ledtrig-timer.c4
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/bitmap.c449
-rw-r--r--drivers/md/bitmap.h19
-rw-r--r--drivers/md/dm-crypt.c207
-rw-r--r--drivers/md/dm-exception-store.c33
-rw-r--r--drivers/md/dm-exception-store.h62
-rw-r--r--drivers/md/dm-io.c120
-rw-r--r--drivers/md/dm-ioctl.c123
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c95
-rw-r--r--drivers/md/dm-raid1.c219
-rw-r--r--drivers/md/dm-region-hash.c31
-rw-r--r--drivers/md/dm-snap-persistent.c195
-rw-r--r--drivers/md/dm-snap-transient.c24
-rw-r--r--drivers/md/dm-snap.c1279
-rw-r--r--drivers/md/dm-sysfs.c10
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-uevent.c9
-rw-r--r--drivers/md/dm.c643
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/faulty.c1
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c413
-rw-r--r--drivers/md/md.h51
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/md/raid1.c217
-rw-r--r--drivers/md/raid1.h5
-rw-r--r--drivers/md/raid10.c116
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid6algos.c20
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c2
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/mfd/88pm8607.c302
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/ab3100-core.c3
-rw-r--r--drivers/mfd/ab4500-core.c208
-rw-r--r--drivers/mfd/adp5520.c379
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/ezx-pcap.c1
-rw-r--r--drivers/mfd/mc13783-core.c757
-rw-r--r--drivers/mfd/pcf50633-adc.c5
-rw-r--r--drivers/mfd/pcf50633-core.c76
-rw-r--r--drivers/mfd/tps65010.c30
-rw-r--r--drivers/mfd/twl-core.c (renamed from drivers/mfd/twl4030-core.c)385
-rw-r--r--drivers/mfd/twl4030-codec.c10
-rw-r--r--drivers/mfd/twl4030-irq.c158
-rw-r--r--drivers/mfd/twl4030-power.c126
-rw-r--r--drivers/mfd/twl6030-irq.c299
-rw-r--r--drivers/mfd/wm831x-core.c232
-rw-r--r--drivers/mfd/wm831x-irq.c209
-rw-r--r--drivers/mfd/wm8350-core.c771
-rw-r--r--drivers/mfd/wm8350-irq.c529
-rw-r--r--drivers/mfd/wm8350-regmap.c8
-rw-r--r--drivers/misc/Kconfig48
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot.c666
-rw-r--r--drivers/misc/cs5535-mfgpt.c370
-rw-r--r--drivers/misc/eeprom/eeprom.c8
-rw-r--r--drivers/misc/ics932s401.c11
-rw-r--r--drivers/misc/ioc4.c16
-rw-r--r--drivers/misc/ti_dac7512.c101
-rw-r--r--drivers/mmc/core/Kconfig4
-rw-r--r--drivers/mmc/core/core.c16
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/core/sdio_cis.c167
-rw-r--r--drivers/mmc/host/Kconfig27
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c141
-rw-r--r--drivers/mmc/host/bfin_sdh.c639
-rw-r--r--drivers/mmc/host/davinci_mmc.c1349
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/omap.c10
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-pci.c75
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c19
-rw-r--r--drivers/oprofile/cpu_buffer.h4
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pcmcia/Kconfig8
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/cardbus.c23
-rw-r--r--drivers/pcmcia/cistpl.c181
-rw-r--r--drivers/pcmcia/cs.c12
-rw-r--r--drivers/pcmcia/ds.c66
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c45
-rw-r--r--drivers/pcmcia/pcmcia_resource.c37
-rw-r--r--drivers/pcmcia/pxa2xx_base.c21
-rw-r--r--drivers/pcmcia/pxa2xx_base.h3
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c2
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c119
-rw-r--r--drivers/pcmcia/rsrc_mgr.c14
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c315
-rw-r--r--drivers/pcmcia/socket_sysfs.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c149
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/pnp/interface.c36
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/pcf50633-charger.c231
-rw-r--r--drivers/power/power_supply_sysfs.c5
-rw-r--r--drivers/power/wm831x_backup.c233
-rw-r--r--drivers/power/wm831x_power.c144
-rw-r--r--drivers/power/wm8350_power.c63
-rw-r--r--drivers/power/wm97xx_battery.c2
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/pcf50633-regulator.c5
-rw-r--r--drivers/regulator/twl-regulator.c (renamed from drivers/regulator/twl4030-regulator.c)287
-rw-r--r--drivers/regulator/wm8350-regulator.c10
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-pcf50633.c5
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-twl.c (renamed from drivers/rtc/rtc-twl4030.c)284
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c25
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/net/netiucv.c10
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/serial/ioc3_serial.c4
-rw-r--r--drivers/serial/ioc4_serial.c20
-rw-r--r--drivers/serial/pxa.c2
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/sn/ioc3.c17
-rw-r--r--drivers/spi/Kconfig40
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/au1550_spi.c10
-rw-r--r--drivers/spi/mpc52xx_spi.c86
-rw-r--r--drivers/spi/omap_spi_100k.c635
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_imx.c35
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/spi/spi_nuc900.c504
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/spi/spi_sh_msiof.c691
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/spi/xilinx_spi.c355
-rw-r--r--drivers/spi/xilinx_spi.h32
-rw-r--r--drivers/spi/xilinx_spi_of.c134
-rw-r--r--drivers/spi/xilinx_spi_pltfm.c102
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.h2
-rw-r--r--drivers/usb/core/usb.c25
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c38
-rw-r--r--drivers/video/backlight/adp5520_bl.c123
-rw-r--r--drivers/video/backlight/da903x_bl.c2
-rw-r--r--drivers/video/backlight/lcd.c4
-rw-r--r--drivers/video/display/display-sysfs.c2
-rw-r--r--drivers/video/geode/display_gx.c4
-rw-r--r--drivers/video/geode/gxfb.h2
-rw-r--r--drivers/video/geode/gxfb_core.c2
-rw-r--r--drivers/video/geode/lxfb.h2
-rw-r--r--drivers/video/geode/lxfb_ops.c4
-rw-r--r--drivers/video/geode/suspend_gx.c2
-rw-r--r--drivers/video/geode/video_gx.c2
-rw-r--r--drivers/video/hitfb.c2
-rw-r--r--drivers/video/omap/lcd_2430sdp.c4
-rw-r--r--drivers/video/output.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/watchdog/adx_wdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c4
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/cachefiles/daemon.c4
-rw-r--r--fs/compat_ioctl.c18
-rw-r--r--fs/exec.c9
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c7
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/sys.c16
-rw-r--r--fs/hfs/catalog.c4
-rw-r--r--fs/hfs/dir.c11
-rw-r--r--fs/hfs/super.c7
-rw-r--r--fs/jbd/Kconfig1
-rw-r--r--fs/jbd2/Kconfig1
-rw-r--r--fs/nfs/callback.c13
-rw-r--r--fs/nfs/callback.h16
-rw-r--r--fs/nfs/callback_proc.c64
-rw-r--r--fs/nfs/callback_xdr.c34
-rw-r--r--fs/nfs/client.c14
-rw-r--r--fs/nfs/delegation.c77
-rw-r--r--fs/nfs/delegation.h7
-rw-r--r--fs/nfs/dir.c67
-rw-r--r--fs/nfs/dns_resolve.c4
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/iostat.h24
-rw-r--r--fs/nfs/nfs4_fs.h12
-rw-r--r--fs/nfs/nfs4proc.c458
-rw-r--r--fs/nfs/nfs4state.c225
-rw-r--r--fs/nfs/nfs4xdr.c135
-rw-r--r--fs/nfs/read.c12
-rw-r--r--fs/nfs/super.c104
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/nilfs2/Kconfig1
-rw-r--r--fs/proc/base.c68
-rw-r--r--fs/proc/task_mmu.c45
-rw-r--r--fs/proc/task_nommu.c8
-rw-r--r--fs/reiserfs/Kconfig1
-rw-r--r--fs/ubifs/debug.c9
-rw-r--r--fs/ubifs/super.c7
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c24
-rw-r--r--fs/udf/namei.c38
-rw-r--r--fs/udf/super.c32
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c52
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h33
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c87
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h45
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c75
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1369
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h4
-rw-r--r--fs/xfs/quota/xfs_dquot.c110
-rw-r--r--fs/xfs/quota/xfs_dquot.h21
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/support/ktrace.c323
-rw-r--r--fs/xfs/support/ktrace.h85
-rw-r--r--fs/xfs/xfs.h16
-rw-r--r--fs/xfs/xfs_ag.h14
-rw-r--r--fs/xfs/xfs_alloc.c230
-rw-r--r--fs/xfs/xfs_alloc.h27
-rw-r--r--fs/xfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/xfs_attr.c107
-rw-r--r--fs/xfs/xfs_attr.h10
-rw-r--r--fs/xfs/xfs_attr_leaf.c14
-rw-r--r--fs/xfs/xfs_attr_sf.h40
-rw-r--r--fs/xfs/xfs_bmap.c942
-rw-r--r--fs/xfs/xfs_bmap.h58
-rw-r--r--fs/xfs/xfs_bmap_btree.c6
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_btree_trace.h17
-rw-r--r--fs/xfs/xfs_buf_item.c87
-rw-r--r--fs/xfs/xfs_buf_item.h20
-rw-r--r--fs/xfs/xfs_da_btree.c3
-rw-r--r--fs/xfs/xfs_da_btree.h7
-rw-r--r--fs/xfs/xfs_dfrag.c2
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2_block.c20
-rw-r--r--fs/xfs/xfs_dir2_leaf.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c27
-rw-r--r--fs/xfs/xfs_dir2_sf.c26
-rw-r--r--fs/xfs/xfs_dir2_trace.c216
-rw-r--r--fs/xfs/xfs_dir2_trace.h72
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_iget.c111
-rw-r--r--fs/xfs/xfs_inode.c79
-rw-r--r--fs/xfs/xfs_inode.h82
-rw-r--r--fs/xfs/xfs_inode_item.c5
-rw-r--r--fs/xfs/xfs_iomap.c85
-rw-r--r--fs/xfs/xfs_iomap.h8
-rw-r--r--fs/xfs/xfs_log.c181
-rw-r--r--fs/xfs/xfs_log_priv.h20
-rw-r--r--fs/xfs/xfs_log_recover.c15
-rw-r--r--fs/xfs/xfs_mount.c14
-rw-r--r--fs/xfs/xfs_quota.h8
-rw-r--r--fs/xfs/xfs_rename.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c1
-rw-r--r--fs/xfs/xfs_rw.c3
-rw-r--r--fs/xfs/xfs_trans.h47
-rw-r--r--fs/xfs/xfs_trans_buf.c62
-rw-r--r--fs/xfs/xfs_vnodeops.c8
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/mman-common.h5
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h2
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel.h55
-rw-r--r--include/linux/ksm.h88
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h1
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h13
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/spi/mpc52xx_spi.h10
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h10
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h15
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/snmp.h50
-rw-r--r--include/pcmcia/cs.h4
-rw-r--r--include/pcmcia/ds.h6
-rw-r--r--include/pcmcia/mem_op.h2
-rw-r--r--include/pcmcia/ss.h12
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c18
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c50
-rw-r--r--kernel/hrtimer.c50
-rw-r--r--kernel/hw_breakpoint.c4
-rw-r--r--kernel/irq/autoprobe.c20
-rw-r--r--kernel/irq/chip.c86
-rw-r--r--kernel/irq/handle.c22
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/lockdep.c31
-rw-r--r--kernel/module.c150
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/params.c8
-rw-r--r--kernel/perf_event.c106
-rw-r--r--kernel/power/console.c7
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/rtmutex-debug.c4
-rw-r--r--kernel/rtmutex.c106
-rw-r--r--kernel/sched.c231
-rw-r--r--kernel/sched_cpupri.c10
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_idletask.c4
-rw-r--r--kernel/sched_rt.c60
-rw-r--r--kernel/smp.c35
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/spinlock.c306
-rw-r--r--kernel/sys.c8
-rw-r--r--kernel/sysctl.c18
-rw-r--r--kernel/time/clockevents.c14
-rw-r--r--kernel/time/tick-broadcast.c42
-rw-r--r--kernel/time/tick-common.c20
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/time/timer_stats.c18
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c62
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--kernel/trace/trace_hw_branches.c51
-rw-r--r--kernel/trace/trace_sched_wakeup.c16
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_stack.c16
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/crc32.c121
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/kernel_lock.c22
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/rwsem-spinlock.c23
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c25
-rw-r--r--lib/vsprintf.c395
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c177
-rw-r--r--mm/bootmem.c8
-rw-r--r--mm/hugetlb.c551
-rw-r--r--mm/internal.h23
-rw-r--r--mm/ksm.c953
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c29
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/mempolicy.c69
-rw-r--r--mm/migrate.c133
-rw-r--r--mm/mincore.c37
-rw-r--r--mm/mlock.c45
-rw-r--r--mm/mmap.c50
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/oom_kill.c40
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_io.c17
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/percpu.c24
-rw-r--r--mm/rmap.c350
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slab.c30
-rw-r--r--mm/slub.c4
-rw-r--r--mm/swapfile.c847
-rw-r--r--mm/vmalloc.c11
-rw-r--r--mm/vmscan.c321
-rw-r--r--mm/vmstat.c10
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/sunrpc/addr.c10
-rw-r--r--net/sunrpc/auth.c39
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c6
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c104
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c2
-rwxr-xr-xscripts/get_maintainer.pl499
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--sound/arm/pxa2xx-ac97.c2
-rw-r--r--sound/isa/gus/gus_mem.c3
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/cs5535audio/Makefile2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c1
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h4
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c26
-rw-r--r--sound/pci/hda/hda_hwdep.c7
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c8
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c3
-rw-r--r--sound/soc/codecs/twl4030.c10
-rw-r--r--sound/soc/codecs/wm8350.c25
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h2
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--tools/perf/Makefile9
-rw-r--r--tools/perf/bench/sched-messaging.c8
-rw-r--r--tools/perf/bench/sched-pipe.c11
-rw-r--r--tools/perf/builtin-annotate.c15
-rw-r--r--tools/perf/builtin-bench.c57
-rw-r--r--tools/perf/builtin-buildid-list.c55
-rw-r--r--tools/perf/builtin-kmem.c15
-rw-r--r--tools/perf/builtin-record.c25
-rw-r--r--tools/perf/builtin-report.c20
-rw-r--r--tools/perf/builtin-sched.c13
-rw-r--r--tools/perf/builtin-timechart.c15
-rw-r--r--tools/perf/builtin-trace.c20
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/util/data_map.c71
-rw-r--r--tools/perf/util/data_map.h9
-rw-r--r--tools/perf/util/event.c11
-rw-r--r--tools/perf/util/event.h7
-rw-r--r--tools/perf/util/header.c28
-rw-r--r--tools/perf/util/header.h4
-rw-r--r--tools/perf/util/map.c87
-rw-r--r--tools/perf/util/session.c80
-rw-r--r--tools/perf/util/session.h16
-rw-r--r--tools/perf/util/symbol.c268
-rw-r--r--tools/perf/util/symbol.h17
-rw-r--r--tools/perf/util/thread.c63
-rw-r--r--tools/perf/util/thread.h42
982 files changed, 41230 insertions, 16785 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index 9fe91c02ee40..bf1627b02a03 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -60,6 +60,19 @@ Description:
Users: hotplug memory remove tools
https://w3.opensource.ibm.com/projects/powerpc-utils/
+
+What: /sys/devices/system/memoryX/nodeY
+Date: October 2009
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ When CONFIG_NUMA is enabled, a symbolic link that
+ points to the corresponding NUMA node directory.
+
+ For example, the following symbolic link is created for
+ memory section 9 on node0:
+ /sys/devices/system/memory/memory9/node0 -> ../../node/node0
+
+
What: /sys/devices/system/node/nodeX/memoryY
Date: September 2008
Contact: Gary Hade <garyhade@us.ibm.com>
@@ -70,4 +83,3 @@ Description:
memory section directory. For example, the following symbolic
link is created for memory section 9 on node0.
/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
-
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 2aae06fcbed7..84a710f87c64 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -92,6 +92,20 @@ Description: Discover NUMA node a CPU belongs to
/sys/devices/system/cpu/cpu42/node2 -> ../../node/node2
+What: /sys/devices/system/cpu/cpu#/node
+Date: October 2009
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Discover NUMA node a CPU belongs to
+
+ When CONFIG_NUMA is enabled, a symbolic link that points
+ to the corresponding NUMA node directory.
+
+ For example, the following symlink is created for cpu42
+ in NUMA node 2:
+
+ /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2
+
+
What: /sys/devices/system/cpu/cpu#/topology/core_id
/sys/devices/system/cpu/cpu#/topology/core_siblings
/sys/devices/system/cpu/cpu#/topology/core_siblings_list
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index a5009c8300f3..e3a77b215135 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -8,13 +8,19 @@ the block device which are also writable without interfering with the
original content;
*) To create device "forks", i.e. multiple different versions of the
same data stream.
+*) To merge a snapshot of a block device back into the snapshot's origin
+device.
+In the first two cases, dm copies only the chunks of data that get
+changed and uses a separate copy-on-write (COW) block device for
+storage.
-In both cases, dm copies only the chunks of data that get changed and
-uses a separate copy-on-write (COW) block device for storage.
+For snapshot merge the contents of the COW storage are merged back into
+the origin device.
-There are two dm targets available: snapshot and snapshot-origin.
+There are three dm targets available:
+snapshot, snapshot-origin, and snapshot-merge.
*) snapshot-origin <origin>
@@ -40,8 +46,25 @@ The difference is that for transient snapshots less metadata must be
saved on disk - they can be kept in memory by the kernel.
-How this is used by LVM2
-========================
+* snapshot-merge <origin> <COW device> <persistent> <chunksize>
+
+takes the same table arguments as the snapshot target except it only
+works with persistent snapshots. This target assumes the role of the
+"snapshot-origin" target and must not be loaded if the "snapshot-origin"
+is still present for <origin>.
+
+Creates a merging snapshot that takes control of the changed chunks
+stored in the <COW device> of an existing snapshot, through a handover
+procedure, and merges these chunks back into the <origin>. Once merging
+has started (in the background) the <origin> may be opened and the merge
+will continue while I/O is flowing to it. Changes to the <origin> are
+deferred until the merging snapshot's corresponding chunk(s) have been
+merged. Once merging has started the snapshot device, associated with
+the "snapshot" target, will return -EIO when accessed.
+
+
+How snapshot is used by LVM2
+============================
When you create the first LVM2 snapshot of a volume, four dm devices are used:
1) a device containing the original mapping table of the source volume;
@@ -72,3 +95,30 @@ brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow
brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap
brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base
+
+How snapshot-merge is used by LVM2
+==================================
+A merging snapshot assumes the role of the "snapshot-origin" while
+merging. As such the "snapshot-origin" is replaced with
+"snapshot-merge". The "-real" device is not changed and the "-cow"
+device is renamed to <origin name>-cow to aid LVM2's cleanup of the
+merging snapshot after it completes. The "snapshot" that hands over its
+COW device to the "snapshot-merge" is deactivated (unless using lvchange
+--refresh); but if it is left active it will simply return I/O errors.
+
+A snapshot will merge into its origin with the following command:
+
+lvconvert --merge volumeGroup/snap
+
+we'll now have this situation:
+
+# dmsetup table|grep volumeGroup
+
+volumeGroup-base-real: 0 2097152 linear 8:19 384
+volumeGroup-base-cow: 0 204800 linear 8:19 2097536
+volumeGroup-base: 0 2097152 snapshot-merge 254:11 254:12 P 16
+
+# ls -lL /dev/mapper/volumeGroup-*
+brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real
+brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow
+brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 2a4d77946c7d..21ab9357326d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -291,22 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de>
---------------------------
-What: usedac i386 kernel parameter
-When: 2.6.27
-Why: replaced by allowdac and no dac combination
-Who: Glauber Costa <gcosta@redhat.com>
-
----------------------------
-
-What: print_fn_descriptor_symbol()
-When: October 2009
-Why: The %pF vsprintf format provides the same functionality in a
- simpler way. print_fn_descriptor_symbol() is deprecated but
- still present to give out-of-tree modules time to change.
-Who: Bjorn Helgaas <bjorn.helgaas@hp.com>
-
----------------------------
-
What: /sys/o2cb symlink
When: January 2010
Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 94b9f2056f4c..220cc6376ef8 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -38,6 +38,7 @@ Table of Contents
3.3 /proc/<pid>/io - Display the IO accounting fields
3.4 /proc/<pid>/coredump_filter - Core dump filtering settings
3.5 /proc/<pid>/mountinfo - Information about mounts
+ 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
------------------------------------------------------------------------------
@@ -1409,3 +1410,11 @@ For more information on mount propagation see:
Documentation/filesystems/sharedsubtree.txt
+
+3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
+--------------------------------------------------------
+These files provide a method to access a tasks comm value. It also allows for
+a task to set its own or one of its thread siblings comm value. The comm value
+is limited in size compared to the cmdline value, so writing anything longer
+then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
+comm value.
diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/hwmon/lis3lv02d
index effe949a7282..06534f25e643 100644
--- a/Documentation/hwmon/lis3lv02d
+++ b/Documentation/hwmon/lis3lv02d
@@ -3,7 +3,8 @@ Kernel driver lis3lv02d
Supported chips:
- * STMicroelectronics LIS3LV02DL and LIS3LV02DQ
+ * STMicroelectronics LIS3LV02DL, LIS3LV02DQ (12 bits precision)
+ * STMicroelectronics LIS302DL, LIS3L02DQ, LIS331DL (8 bits)
Authors:
Yan Burman <burman.yan@gmail.com>
@@ -13,32 +14,52 @@ Authors:
Description
-----------
-This driver provides support for the accelerometer found in various HP
-laptops sporting the feature officially called "HP Mobile Data
-Protection System 3D" or "HP 3D DriveGuard". It detects automatically
-laptops with this sensor. Known models (for now the HP 2133, nc6420,
-nc2510, nc8510, nc84x0, nw9440 and nx9420) will have their axis
-automatically oriented on standard way (eg: you can directly play
-neverball). The accelerometer data is readable via
-/sys/devices/platform/lis3lv02d.
+This driver provides support for the accelerometer found in various HP laptops
+sporting the feature officially called "HP Mobile Data Protection System 3D" or
+"HP 3D DriveGuard". It detects automatically laptops with this sensor. Known
+models (full list can be found in drivers/hwmon/hp_accel.c) will have their
+axis automatically oriented on standard way (eg: you can directly play
+neverball). The accelerometer data is readable via
+/sys/devices/platform/lis3lv02d. Reported values are scaled
+to mg values (1/1000th of earth gravity).
Sysfs attributes under /sys/devices/platform/lis3lv02d/:
position - 3D position that the accelerometer reports. Format: "(x,y,z)"
-calibrate - read: values (x, y, z) that are used as the base for input
- class device operation.
- write: forces the base to be recalibrated with the current
- position.
-rate - reports the sampling rate of the accelerometer device in HZ
+rate - read reports the sampling rate of the accelerometer device in HZ.
+ write changes sampling rate of the accelerometer device.
+ Only values which are supported by HW are accepted.
+selftest - performs selftest for the chip as specified by chip manufacturer.
This driver also provides an absolute input class device, allowing
-the laptop to act as a pinball machine-esque joystick.
+the laptop to act as a pinball machine-esque joystick. Joystick device can be
+calibrated. Joystick device can be in two different modes.
+By default output values are scaled between -32768 .. 32767. In joystick raw
+mode, joystick and sysfs position entry have the same scale. There can be
+small difference due to input system fuzziness feature.
+Events are also available as input event device.
+
+Selftest is meant only for hardware diagnostic purposes. It is not meant to be
+used during normal operations. Position data is not corrupted during selftest
+but interrupt behaviour is not guaranteed to work reliably. In test mode, the
+sensing element is internally moved little bit. Selftest measures difference
+between normal mode and test mode. Chip specifications tell the acceptance
+limit for each type of the chip. Limits are provided via platform data
+to allow adjustment of the limits without a change to the actual driver.
+Seltest returns either "OK x y z" or "FAIL x y z" where x, y and z are
+measured difference between modes. Axes are not remapped in selftest mode.
+Measurement values are provided to help HW diagnostic applications to make
+final decision.
+
+On HP laptops, if the led infrastructure is activated, support for a led
+indicating disk protection will be provided as /sys/class/leds/hp::hddprotect.
Another feature of the driver is misc device called "freefall" that
acts similar to /dev/rtc and reacts on free-fall interrupts received
from the device. It supports blocking operations, poll/select and
fasync operation modes. You must read 1 bytes from the device. The
result is number of free-fall interrupts since the last successful
-read (or 255 if number of interrupts would not fit).
+read (or 255 if number of interrupts would not fit). See the hpfall.c
+file for an example on using the device.
Axes orientation
@@ -55,7 +76,7 @@ the accelerometer are converted into a "standard" organisation of the axes
* If the laptop is put upside-down, Z becomes negative
If your laptop model is not recognized (cf "dmesg"), you can send an
-email to the authors to add it to the database. When reporting a new
+email to the maintainer to add it to the database. When reporting a new
laptop, please include the output of "dmidecode" plus the value of
/sys/devices/platform/lis3lv02d/position in these four cases.
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index 02b74899edaf..b7e42ec4b26b 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -81,8 +81,14 @@ pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
0 (stop) to 255 (full)
pwm[1-4]_enable - this file controls mode of fan/temperature control:
- * 1 Manual Mode, write to pwm file any value 0-255 (full speed)
- * 2 Thermal Cruise
+ * 1 Manual mode, write to pwm file any value 0-255 (full speed)
+ * 2 "Thermal Cruise" mode
+ * 3 "Fan Speed Cruise" mode
+ * 4 "Smart Fan III" mode
+
+pwm[1-4]_mode - controls if output is PWM or DC level
+ * 0 DC output (0 - 12v)
+ * 1 PWM output
Thermal Cruise mode
-------------------
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 7860aafb483d..0a74603eb671 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -44,7 +44,7 @@ static struct i2c_driver foo_driver = {
/* if device autodetection is needed: */
.class = I2C_CLASS_SOMETHING,
.detect = foo_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
.shutdown = foo_shutdown, /* optional */
.suspend = foo_suspend, /* optional */
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 4edd39ec7db9..188f4768f1d5 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -233,9 +233,9 @@ All md devices contain:
resync_start
The point at which resync should start. If no resync is needed,
- this will be a very large number. At array creation it will
- default to 0, though starting the array as 'clean' will
- set it much larger.
+ this will be a very large number (or 'none' since 2.6.30-rc1). At
+ array creation it will default to 0, though starting the array as
+ 'clean' will set it much larger.
new_dev
This file can be written but not read. The value written should
@@ -296,6 +296,51 @@ All md devices contain:
active-idle
like active, but no writes have been seen for a while (safe_mode_delay).
+ bitmap/location
+ This indicates where the write-intent bitmap for the array is
+ stored.
+ It can be one of "none", "file" or "[+-]N".
+ "file" may later be extended to "file:/file/name"
+ "[+-]N" means that many sectors from the start of the metadata.
+ This is replicated on all devices. For arrays with externally
+ managed metadata, the offset is from the beginning of the
+ device.
+ bitmap/chunksize
+ The size, in bytes, of the chunk which will be represented by a
+ single bit. For RAID456, it is a portion of an individual
+ device. For RAID10, it is a portion of the array. For RAID1, it
+ is both (they come to the same thing).
+ bitmap/time_base
+ The time, in seconds, between looking for bits in the bitmap to
+ be cleared. In the current implementation, a bit will be cleared
+ between 2 and 3 times "time_base" after all the covered blocks
+ are known to be in-sync.
+ bitmap/backlog
+ When write-mostly devices are active in a RAID1, write requests
+ to those devices proceed in the background - the filesystem (or
+ other user of the device) does not have to wait for them.
+ 'backlog' sets a limit on the number of concurrent background
+ writes. If there are more than this, new writes will by
+ synchronous.
+ bitmap/metadata
+ This can be either 'internal' or 'external'.
+ 'internal' is the default and means the metadata for the bitmap
+ is stored in the first 256 bytes of the allocated space and is
+ managed by the md module.
+ 'external' means that bitmap metadata is managed externally to
+ the kernel (i.e. by some userspace program)
+ bitmap/can_clear
+ This is either 'true' or 'false'. If 'true', then bits in the
+ bitmap will be cleared when the corresponding blocks are thought
+ to be in-sync. If 'false', bits will never be cleared.
+ This is automatically set to 'false' if a write happens on a
+ degraded array, or if the array becomes degraded during a write.
+ When metadata is managed externally, it should be set to true
+ once the array becomes non-degraded, and this fact has been
+ recorded in the metadata.
+
+
+
As component devices are added to an md array, they appear in the 'md'
directory as new directories named
@@ -334,8 +379,9 @@ Each directory contains:
Writing "writemostly" sets the writemostly flag.
Writing "-writemostly" clears the writemostly flag.
Writing "blocked" sets the "blocked" flag.
- Writing "-blocked" clear the "blocked" flag and allows writes
+ Writing "-blocked" clears the "blocked" flag and allows writes
to complete.
+ Writing "in_sync" sets the in_sync flag.
This file responds to select/poll. Any change to 'faulty'
or 'blocked' causes an event.
@@ -372,6 +418,24 @@ Each directory contains:
array. If a value less than the current component_size is
written, it will be rejected.
+ recovery_start
+
+ When the device is not 'in_sync', this records the number of
+ sectors from the start of the device which are known to be
+ correct. This is normally zero, but during a recovery
+ operation is will steadily increase, and if the recovery is
+ interrupted, restoring this value can cause recovery to
+ avoid repeating the earlier blocks. With v1.x metadata, this
+ value is saved and restored automatically.
+
+ This can be set whenever the device is not an active member of
+ the array, either before the array is activated, or before
+ the 'slot' is set.
+
+ Setting this to 'none' is equivalent to setting 'in_sync'.
+ Setting to any other value also clears the 'in_sync' flag.
+
+
An active md device will also contain and entry for each active device
in the array. These are named
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index bbc8a6a36921..57e7e9cc1870 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -160,12 +160,15 @@ Under each section, you can see 4 files.
NOTE:
These directories/files appear after physical memory hotplug phase.
-If CONFIG_NUMA is enabled the
-/sys/devices/system/memory/memoryXXX memory section
-directories can also be accessed via symbolic links located in
-the /sys/devices/system/node/node* directories. For example:
+If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
+via symbolic links located in the /sys/devices/system/node/node* directories.
+
+For example:
/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
+A backlink will also be created:
+/sys/devices/system/memory/memory9/node0 -> ../../node/node0
+
--------------------------------
4. Physical memory hot-add phase
--------------------------------
diff --git a/Documentation/misc-devices/ad525x_dpot.txt b/Documentation/misc-devices/ad525x_dpot.txt
new file mode 100644
index 000000000000..0c9413b1cbf3
--- /dev/null
+++ b/Documentation/misc-devices/ad525x_dpot.txt
@@ -0,0 +1,57 @@
+---------------------------------
+ AD525x Digital Potentiometers
+---------------------------------
+
+The ad525x_dpot driver exports a simple sysfs interface. This allows you to
+work with the immediate resistance settings as well as update the saved startup
+settings. Access to the factory programmed tolerance is also provided, but
+interpretation of this settings is required by the end application according to
+the specific part in use.
+
+---------
+ Files
+---------
+
+Each dpot device will have a set of eeprom, rdac, and tolerance files. How
+many depends on the actual part you have, as will the range of allowed values.
+
+The eeprom files are used to program the startup value of the device.
+
+The rdac files are used to program the immediate value of the device.
+
+The tolerance files are the read-only factory programmed tolerance settings
+and may vary greatly on a part-by-part basis. For exact interpretation of
+this field, please consult the datasheet for your part. This is presented
+as a hex file for easier parsing.
+
+-----------
+ Example
+-----------
+
+Locate the device in your sysfs tree. This is probably easiest by going into
+the common i2c directory and locating the device by the i2c slave address.
+
+ # ls /sys/bus/i2c/devices/
+ 0-0022 0-0027 0-002f
+
+So assuming the device in question is on the first i2c bus and has the slave
+address of 0x2f, we descend (unrelated sysfs entries have been trimmed).
+
+ # ls /sys/bus/i2c/devices/0-002f/
+ eeprom0 rdac0 tolerance0
+
+You can use simple reads/writes to access these files:
+
+ # cd /sys/bus/i2c/devices/0-002f/
+
+ # cat eeprom0
+ 0
+ # echo 10 > eeprom0
+ # cat eeprom0
+ 10
+
+ # cat rdac0
+ 5
+ # echo 3 > rdac0
+ # cat rdac0
+ 3
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index b565e8279d13..8e1ddec2c78a 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -119,6 +119,32 @@ FURTHER NOTES ON NO-MMU MMAP
granule but will only discard the excess if appropriately configured as
this has an effect on fragmentation.
+ (*) The memory allocated by a request for an anonymous mapping will normally
+ be cleared by the kernel before being returned in accordance with the
+ Linux man pages (ver 2.22 or later).
+
+ In the MMU case this can be achieved with reasonable performance as
+ regions are backed by virtual pages, with the contents only being mapped
+ to cleared physical pages when a write happens on that specific page
+ (prior to which, the pages are effectively mapped to the global zero page
+ from which reads can take place). This spreads out the time it takes to
+ initialize the contents of a page - depending on the write-usage of the
+ mapping.
+
+ In the no-MMU case, however, anonymous mappings are backed by physical
+ pages, and the entire map is cleared at allocation time. This can cause
+ significant delays during a userspace malloc() as the C library does an
+ anonymous mapping and the kernel then does a memset for the entire map.
+
+ However, for memory that isn't required to be precleared - such as that
+ returned by malloc() - mmap() can take a MAP_UNINITIALIZED flag to
+ indicate to the kernel that it shouldn't bother clearing the memory before
+ returning it. Note that CONFIG_MMAP_ALLOW_UNINITIALIZED must be enabled
+ to permit this, otherwise the flag will be ignored.
+
+ uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this
+ to allocate the brk and stack region.
+
(*) A list of all the private copy and anonymous mappings on the system is
visible through /proc/maps in no-MMU mode.
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 619699dde593..178c831b907d 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -1,73 +1,8 @@
-SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
-are hence deprecated.
+Lesson 1: Spin locks
-Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
-__SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate for static
-initialization.
-
-Most of the time, you can simply turn:
-
- static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
-
-into:
-
- static DEFINE_SPINLOCK(xxx_lock);
-
-Static structure member variables go from:
-
- struct foo bar {
- .lock = SPIN_LOCK_UNLOCKED;
- };
-
-to:
-
- struct foo bar {
- .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
- };
-
-Declaration of static rw_locks undergo a similar transformation.
-
-Dynamic initialization, when necessary, may be performed as
-demonstrated below.
-
- spinlock_t xxx_lock;
- rwlock_t xxx_rw_lock;
-
- static int __init xxx_init(void)
- {
- spin_lock_init(&xxx_lock);
- rwlock_init(&xxx_rw_lock);
- ...
- }
-
- module_init(xxx_init);
-
-The following discussion is still valid, however, with the dynamic
-initialization of spinlocks or with DEFINE_SPINLOCK, etc., used
-instead of SPIN_LOCK_UNLOCKED.
-
------------------------
-
-On Fri, 2 Jan 1998, Doug Ledford wrote:
->
-> I'm working on making the aic7xxx driver more SMP friendly (as well as
-> importing the latest FreeBSD sequencer code to have 7895 support) and wanted
-> to get some info from you. The goal here is to make the various routines
-> SMP safe as well as UP safe during interrupts and other manipulating
-> routines. So far, I've added a spin_lock variable to things like my queue
-> structs. Now, from what I recall, there are some spin lock functions I can
-> use to lock these spin locks from other use as opposed to a (nasty)
-> save_flags(); cli(); stuff; restore_flags(); construct. Where do I find
-> these routines and go about making use of them? Do they only lock on a
-> per-processor basis or can they also lock say an interrupt routine from
-> mucking with a queue if the queue routine was manipulating it when the
-> interrupt occurred, or should I still use a cli(); based construct on that
-> one?
-
-See <asm/spinlock.h>. The basic version is:
-
- spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+The most basic primitive for locking is spinlock.
+static DEFINE_SPINLOCK(xxx_lock);
unsigned long flags;
@@ -75,13 +10,11 @@ See <asm/spinlock.h>. The basic version is:
... critical section here ..
spin_unlock_irqrestore(&xxx_lock, flags);
-and the above is always safe. It will disable interrupts _locally_, but the
+The above is always safe. It will disable interrupts _locally_, but the
spinlock itself will guarantee the global lock, so it will guarantee that
there is only one thread-of-control within the region(s) protected by that
-lock.
-
-Note that it works well even under UP - the above sequence under UP
-essentially is just the same as doing a
+lock. This works well even under UP. The above sequence under UP
+essentially is just the same as doing
unsigned long flags;
@@ -91,15 +24,13 @@ essentially is just the same as doing a
so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
work correctly under both (and spinlocks are actually more efficient on
-architectures that allow doing the "save_flags + cli" in one go because I
-don't export that interface normally).
+architectures that allow doing the "save_flags + cli" in one operation).
+
+ NOTE! Implications of spin_locks for memory are further described in:
-NOTE NOTE NOTE! The reason the spinlock is so much faster than a global
-interrupt lock under SMP is exactly because it disables interrupts only on
-the local CPU. The spin-lock is safe only when you _also_ use the lock
-itself to do locking across CPU's, which implies that EVERYTHING that
-touches a shared variable has to agree about the spinlock they want to
-use.
+ Documentation/memory-barriers.txt
+ (5) LOCK operations.
+ (6) UNLOCK operations.
The above is usually pretty simple (you usually need and want only one
spinlock for most things - using more than one spinlock can make things a
@@ -120,20 +51,24 @@ and another sequence that does
then they are NOT mutually exclusive, and the critical regions can happen
at the same time on two different CPU's. That's fine per se, but the
critical regions had better be critical for different things (ie they
-can't stomp on each other).
+can't stomp on each other).
The above is a problem mainly if you end up mixing code - for example the
routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
their actions, and if a driver uses spinlocks instead then you should
-think about issues like the above..
+think about issues like the above.
This is really the only really hard part about spinlocks: once you start
using spinlocks they tend to expand to areas you might not have noticed
before, because you have to make sure the spinlocks correctly protect the
shared data structures _everywhere_ they are used. The spinlocks are most
-easily added to places that are completely independent of other code (ie
-internal driver data structures that nobody else ever touches, for
-example).
+easily added to places that are completely independent of other code (for
+example, internal driver data structures that nobody else ever touches).
+
+ NOTE! The spin-lock is safe only when you _also_ use the lock itself
+ to do locking across CPU's, which implies that EVERYTHING that
+ touches a shared variable has to agree about the spinlock they want
+ to use.
----
@@ -141,13 +76,17 @@ Lesson 2: reader-writer spinlocks.
If your data accesses have a very natural pattern where you usually tend
to mostly read from the shared variables, the reader-writer locks
-(rw_lock) versions of the spinlocks are often nicer. They allow multiple
+(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple
readers to be in the same critical region at once, but if somebody wants
-to change the variables it has to get an exclusive write lock. The
-routines look the same as above:
+to change the variables it has to get an exclusive write lock.
- rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
+ NOTE! reader-writer locks require more atomic memory operations than
+ simple spinlocks. Unless the reader critical section is long, you
+ are better off just using spinlocks.
+The routines look the same as above:
+
+ rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
unsigned long flags;
@@ -159,18 +98,21 @@ routines look the same as above:
.. read and write exclusive access to the info ...
write_unlock_irqrestore(&xxx_lock, flags);
-The above kind of lock is useful for complex data structures like linked
-lists etc, especially when you know that most of the work is to just
-traverse the list searching for entries without changing the list itself,
-for example. Then you can use the read lock for that kind of list
-traversal, which allows many concurrent readers. Anything that _changes_
-the list will have to get the write lock.
+The above kind of lock may be useful for complex data structures like
+linked lists, especially searching for entries without changing the list
+itself. The read lock allows many concurrent readers. Anything that
+_changes_ the list will have to get the write lock.
+
+ NOTE! RCU is better for list traversal, but requires careful
+ attention to design detail (see Documentation/RCU/listRCU.txt).
-Note: you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
+Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
time need to do any changes (even if you don't do it every time), you have
-to get the write-lock at the very beginning. I could fairly easily add a
-primitive to create a "upgradeable" read-lock, but it hasn't been an issue
-yet. Tell me if you'd want one.
+to get the write-lock at the very beginning.
+
+ NOTE! We are working hard to remove reader-writer spinlocks in most
+ cases, so please don't add a new one without consensus. (Instead, see
+ Documentation/RCU/rcu.txt for complete information.)
----
@@ -233,4 +175,46 @@ indeed), while write-locks need to protect themselves against interrupts.
Linus
+----
+
+Reference information:
+
+For dynamic initialization, use spin_lock_init() or rwlock_init() as
+appropriate:
+
+ spinlock_t xxx_lock;
+ rwlock_t xxx_rw_lock;
+
+ static int __init xxx_init(void)
+ {
+ spin_lock_init(&xxx_lock);
+ rwlock_init(&xxx_rw_lock);
+ ...
+ }
+
+ module_init(xxx_init);
+
+For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
+__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
+
+SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
+with lockdep state tracking.
+
+Most of the time, you can simply turn:
+ static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+into:
+ static DEFINE_SPINLOCK(xxx_lock);
+
+Static structure member variables go from:
+
+ struct foo bar {
+ .lock = SPIN_LOCK_UNLOCKED;
+ };
+
+to:
+ struct foo bar {
+ .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
+ };
+
+Declaration of static rw_locks undergo a similar transformation.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 8f7a0e73ef44..3894eaa23486 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -19,6 +19,8 @@ Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
+- bootloader_type [ X86 only ]
+- bootloader_version [ X86 only ]
- callhome [ S390 only ]
- auto_msgmni
- core_pattern
@@ -93,6 +95,35 @@ valid for 30 seconds.
==============================================================
+bootloader_type:
+
+x86 bootloader identification
+
+This gives the bootloader type number as indicated by the bootloader,
+shifted left by 4, and OR'd with the low four bits of the bootloader
+version. The reason for this encoding is that this used to match the
+type_of_loader field in the kernel header; the encoding is kept for
+backwards compatibility. That is, if the full bootloader type number
+is 0x15 and the full version number is 0x234, this file will contain
+the value 340 = 0x154.
+
+See the type_of_loader and ext_loader_type fields in
+Documentation/x86/boot.txt for additional information.
+
+==============================================================
+
+bootloader_version:
+
+x86 bootloader version
+
+The complete bootloader version number. In the example above, this
+file will contain the value 564 = 0x234.
+
+See the type_of_loader and ext_loader_ver fields in
+Documentation/x86/boot.txt for additional information.
+
+==============================================================
+
callhome:
Controls the kernel's callhome behavior in case of a kernel panic.
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 82a7bd1800b2..bc31636973e3 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -11,23 +11,21 @@ This optimization is more critical now as bigger and bigger physical memories
(several GBs) are more readily available.
Users can use the huge page support in Linux kernel by either using the mmap
-system call or standard SYSv shared memory system calls (shmget, shmat).
+system call or standard SYSV shared memory system calls (shmget, shmat).
First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
(present under "File systems") and CONFIG_HUGETLB_PAGE (selected
automatically when CONFIG_HUGETLBFS is selected) configuration
options.
-The kernel built with huge page support should show the number of configured
-huge pages in the system by running the "cat /proc/meminfo" command.
+The /proc/meminfo file provides information about the total number of
+persistent hugetlb pages in the kernel's huge page pool. It also displays
+information about the number of free, reserved and surplus huge pages and the
+default huge page size. The huge page size is needed for generating the
+proper alignment and size of the arguments to system calls that map huge page
+regions.
-/proc/meminfo also provides information about the total number of hugetlb
-pages configured in the kernel. It also displays information about the
-number of free hugetlb pages at any time. It also displays information about
-the configured huge page size - this is needed for generating the proper
-alignment and size of the arguments to the above system calls.
-
-The output of "cat /proc/meminfo" will have lines like:
+The output of "cat /proc/meminfo" will include lines like:
.....
HugePages_Total: vvv
@@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus," and is the number of huge pages in
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
-/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb
-pages in the kernel. Super user can dynamically request more (or free some
-pre-configured) huge pages.
-The allocation (or deallocation) of hugetlb pages is possible only if there are
-enough physically contiguous free pages in system (freeing of huge pages is
-possible only if there are enough hugetlb pages free that can be transferred
-back to regular memory pool).
+/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge
+pages in the kernel's huge page pool. "Persistent" huge pages will be
+returned to the huge page pool when freed by a task. A user with root
+privileges can dynamically allocate more or free some persistent huge pages
+by increasing or decreasing the value of 'nr_hugepages'.
-Pages that are used as hugetlb pages are reserved inside the kernel and cannot
-be used for other purposes.
+Pages that are used as huge pages are reserved inside the kernel and cannot
+be used for other purposes. Huge pages cannot be swapped out under
+memory pressure.
-Once the kernel with Hugetlb page support is built and running, a user can
-use either the mmap system call or shared memory system calls to start using
-the huge pages. It is required that the system administrator preallocate
-enough memory for huge page purposes.
+Once a number of huge pages have been pre-allocated to the kernel huge page
+pool, a user with appropriate privilege can use either the mmap system call
+or shared memory system calls to use the huge pages. See the discussion of
+Using Huge Pages, below.
-The administrator can preallocate huge pages on the kernel boot command line by
-specifying the "hugepages=N" parameter, where 'N' = the number of huge pages
-requested. This is the most reliable method for preallocating huge pages as
-memory has not yet become fragmented.
+The administrator can allocate persistent huge pages on the kernel boot
+command line by specifying the "hugepages=N" parameter, where 'N' = the
+number of huge pages requested. This is the most reliable method of
+allocating huge pages as memory has not yet become fragmented.
-Some platforms support multiple huge page sizes. To preallocate huge pages
+Some platforms support multiple huge page sizes. To allocate huge pages
of a specific size, one must preceed the huge pages boot command parameters
with a huge page size selection parameter "hugepagesz=<size>". <size> must
be specified in bytes with optional scale suffix [kKmMgG]. The default huge
page size may be selected with the "default_hugepagesz=<size>" boot parameter.
-/proc/sys/vm/nr_hugepages indicates the current number of configured [default
-size] hugetlb pages in the kernel. Super user can dynamically request more
-(or free some pre-configured) huge pages.
-
-Use the following command to dynamically allocate/deallocate default sized
-huge pages:
+When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages
+indicates the current number of pre-allocated huge pages of the default size.
+Thus, one can use the following command to dynamically allocate/deallocate
+default sized persistent huge pages:
echo 20 > /proc/sys/vm/nr_hugepages
-This command will try to configure 20 default sized huge pages in the system.
+This command will try to adjust the number of default sized huge pages in the
+huge page pool to 20, allocating or freeing huge pages, as required.
+
On a NUMA platform, the kernel will attempt to distribute the huge page pool
-over the all on-line nodes. These huge pages, allocated when nr_hugepages
-is increased, are called "persistent huge pages".
+over all the set of allowed nodes specified by the NUMA memory policy of the
+task that modifies nr_hugepages. The default for the allowed nodes--when the
+task has default memory policy--is all on-line nodes with memory. Allowed
+nodes with insufficient available, contiguous memory for a huge page will be
+silently skipped when allocating persistent huge pages. See the discussion
+below of the interaction of task memory policy, cpusets and per node attributes
+with the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
-physically contiguous memory that is preset in system at the time of the
+physically contiguous memory that is present in system at the time of the
allocation attempt. If the kernel is unable to allocate huge pages from
some nodes in a NUMA system, it will attempt to make up the difference by
allocating extra pages on other nodes with sufficient available contiguous
memory, if any.
-System administrators may want to put this command in one of the local rc init
-files. This will enable the kernel to request huge pages early in the boot
-process when the possibility of getting physical contiguous pages is still
-very high. Administrators can verify the number of huge pages actually
-allocated by checking the sysctl or meminfo. To check the per node
+System administrators may want to put this command in one of the local rc
+init files. This will enable the kernel to allocate huge pages early in
+the boot process when the possibility of getting physical contiguous pages
+is still very high. Administrators can verify the number of huge pages
+actually allocated by checking the sysctl or meminfo. To check the per node
distribution of huge pages in a NUMA system, use:
cat /sys/devices/system/node/node*/meminfo | fgrep Huge
@@ -113,45 +115,47 @@ distribution of huge pages in a NUMA system, use:
/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
requested by applications. Writing any non-zero value into this file
-indicates that the hugetlb subsystem is allowed to try to obtain "surplus"
-huge pages from the buddy allocator, when the normal pool is exhausted. As
-these surplus huge pages go out of use, they are freed back to the buddy
-allocator.
+indicates that the hugetlb subsystem is allowed to try to obtain that
+number of "surplus" huge pages from the kernel's normal page pool, when the
+persistent huge page pool is exhausted. As these surplus huge pages become
+unused, they are freed back to the kernel's normal page pool.
-When increasing the huge page pool size via nr_hugepages, any surplus
+When increasing the huge page pool size via nr_hugepages, any existing surplus
pages will first be promoted to persistent huge pages. Then, additional
huge pages will be allocated, if necessary and if possible, to fulfill
-the new huge page pool size.
+the new persistent huge page pool size.
-The administrator may shrink the pool of preallocated huge pages for
+The administrator may shrink the pool of persistent huge pages for
the default huge page size by setting the nr_hugepages sysctl to a
smaller value. The kernel will attempt to balance the freeing of huge pages
-across all on-line nodes. Any free huge pages on the selected nodes will
-be freed back to the buddy allocator.
-
-Caveat: Shrinking the pool via nr_hugepages such that it becomes less
-than the number of huge pages in use will convert the balance to surplus
-huge pages even if it would exceed the overcommit value. As long as
-this condition holds, however, no more surplus huge pages will be
-allowed on the system until one of the two sysctls are increased
-sufficiently, or the surplus huge pages go out of use and are freed.
+across all nodes in the memory policy of the task modifying nr_hugepages.
+Any free huge pages on the selected nodes will be freed back to the kernel's
+normal page pool.
+
+Caveat: Shrinking the persistent huge page pool via nr_hugepages such that
+it becomes less than the number of huge pages in use will convert the balance
+of the in-use huge pages to surplus huge pages. This will occur even if
+the number of surplus pages it would exceed the overcommit value. As long as
+this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is
+increased sufficiently, or the surplus huge pages go out of use and are freed--
+no more surplus huge pages will be allowed to be allocated.
With support for multiple huge page pools at run-time available, much of
-the huge page userspace interface has been duplicated in sysfs. The above
-information applies to the default huge page size which will be
-controlled by the /proc interfaces for backwards compatibility. The root
-huge page control directory in sysfs is:
+the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs.
+The /proc interfaces discussed above have been retained for backwards
+compatibility. The root huge page control directory in sysfs is:
/sys/kernel/mm/hugepages
For each huge page size supported by the running kernel, a subdirectory
-will exist, of the form
+will exist, of the form:
hugepages-${size}kB
Inside each of these directories, the same set of files will exist:
nr_hugepages
+ nr_hugepages_mempolicy
nr_overcommit_hugepages
free_hugepages
resv_hugepages
@@ -159,6 +163,102 @@ Inside each of these directories, the same set of files will exist:
which function as described above for the default huge page-sized case.
+
+Interaction of Task Memory Policy with Huge Page Allocation/Freeing
+
+Whether huge pages are allocated and freed via the /proc interface or
+the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
+nodes from which huge pages are allocated or freed are controlled by the
+NUMA memory policy of the task that modifies the nr_hugepages_mempolicy
+sysctl or attribute. When the nr_hugepages attribute is used, mempolicy
+is ignored.
+
+The recommended method to allocate or free huge pages to/from the kernel
+huge page pool, using the nr_hugepages example above, is:
+
+ numactl --interleave <node-list> echo 20 \
+ >/proc/sys/vm/nr_hugepages_mempolicy
+
+or, more succinctly:
+
+ numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages_mempolicy
+
+This will allocate or free abs(20 - nr_hugepages) to or from the nodes
+specified in <node-list>, depending on whether number of persistent huge pages
+is initially less than or greater than 20, respectively. No huge pages will be
+allocated nor freed on any node not included in the specified <node-list>.
+
+When adjusting the persistent hugepage count via nr_hugepages_mempolicy, any
+memory policy mode--bind, preferred, local or interleave--may be used. The
+resulting effect on persistent huge page allocation is as follows:
+
+1) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt],
+ persistent huge pages will be distributed across the node or nodes
+ specified in the mempolicy as if "interleave" had been specified.
+ However, if a node in the policy does not contain sufficient contiguous
+ memory for a huge page, the allocation will not "fallback" to the nearest
+ neighbor node with sufficient contiguous memory. To do this would cause
+ undesirable imbalance in the distribution of the huge page pool, or
+ possibly, allocation of persistent huge pages on nodes not allowed by
+ the task's memory policy.
+
+2) One or more nodes may be specified with the bind or interleave policy.
+ If more than one node is specified with the preferred policy, only the
+ lowest numeric id will be used. Local policy will select the node where
+ the task is running at the time the nodes_allowed mask is constructed.
+ For local policy to be deterministic, the task must be bound to a cpu or
+ cpus in a single node. Otherwise, the task could be migrated to some
+ other node at any time after launch and the resulting node will be
+ indeterminate. Thus, local policy is not very useful for this purpose.
+ Any of the other mempolicy modes may be used to specify a single node.
+
+3) The nodes allowed mask will be derived from any non-default task mempolicy,
+ whether this policy was set explicitly by the task itself or one of its
+ ancestors, such as numactl. This means that if the task is invoked from a
+ shell with non-default policy, that policy will be used. One can specify a
+ node list of "all" with numactl --interleave or --membind [-m] to achieve
+ interleaving over all nodes in the system or cpuset.
+
+4) Any task mempolicy specifed--e.g., using numactl--will be constrained by
+ the resource limits of any cpuset in which the task runs. Thus, there will
+ be no way for a task with non-default policy running in a cpuset with a
+ subset of the system nodes to allocate huge pages outside the cpuset
+ without first moving to a cpuset that contains all of the desired nodes.
+
+5) Boot-time huge page allocation attempts to distribute the requested number
+ of huge pages over all on-lines nodes with memory.
+
+Per Node Hugepages Attributes
+
+A subset of the contents of the root huge page control directory in sysfs,
+described above, will be replicated under each the system device of each
+NUMA node with memory in:
+
+ /sys/devices/system/node/node[0-9]*/hugepages/
+
+Under this directory, the subdirectory for each supported huge page size
+contains the following attribute files:
+
+ nr_hugepages
+ free_hugepages
+ surplus_hugepages
+
+The free_' and surplus_' attribute files are read-only. They return the number
+of free and surplus [overcommitted] huge pages, respectively, on the parent
+node.
+
+The nr_hugepages attribute returns the total number of huge pages on the
+specified node. When this attribute is written, the number of persistent huge
+pages on the parent node will be adjusted to the specified value, if sufficient
+resources exist, regardless of the task's mempolicy or cpuset constraints.
+
+Note that the number of overcommit and reserve pages remain global quantities,
+as we don't know until fault time, when the faulting task's mempolicy is
+applied, from which node the huge page allocation will be attempted.
+
+
+Using Huge Pages
+
If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
type hugetlbfs:
@@ -206,9 +306,11 @@ map_hugetlb.c.
* requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means the addresses starting with 0x800000... will need
- * to be specified. Specifying a fixed address is not required on ppc64,
- * i386 or x86_64.
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
@@ -237,14 +339,8 @@ map_hugetlb.c.
#define dprintf(x) printf(x)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define SHMAT_FLAGS (SHM_RND)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define SHMAT_FLAGS (0)
-#endif
int main(void)
{
@@ -302,10 +398,12 @@ int main(void)
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
- * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
+ * For the ia64 architecture, the Linux kernel reserves Region number 4 for
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#include <stdlib.h>
#include <stdio.h>
@@ -317,14 +415,8 @@ int main(void)
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_SHARED | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define FLAGS (MAP_SHARED)
-#endif
void check_bytes(char *addr)
{
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 262d8e6793a3..b392e496f816 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -16,9 +16,9 @@ by sharing the data common between them. But it can be useful to any
application which generates many instances of the same data.
KSM only merges anonymous (private) pages, never pagecache (file) pages.
-KSM's merged pages are at present locked into kernel memory for as long
-as they are shared: so cannot be swapped out like the user pages they
-replace (but swapping KSM pages should follow soon in a later release).
+KSM's merged pages were originally locked into kernel memory, but can now
+be swapped out just like other user pages (but sharing is broken when they
+are swapped back in: ksmd must rediscover their identity and merge again).
KSM only operates on those areas of address space which an application
has advised to be likely candidates for merging, by using the madvise(2)
@@ -44,20 +44,12 @@ includes unmapped gaps (though working on the intervening mapped areas),
and might fail with EAGAIN if not enough memory for internal structures.
Applications should be considerate in their use of MADV_MERGEABLE,
-restricting its use to areas likely to benefit. KSM's scans may use
-a lot of processing power, and its kernel-resident pages are a limited
-resource. Some installations will disable KSM for these reasons.
+restricting its use to areas likely to benefit. KSM's scans may use a lot
+of processing power: some installations will disable KSM for that reason.
The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
readable by all but writable only by root:
-max_kernel_pages - set to maximum number of kernel pages that KSM may use
- e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
- Value 0 imposes no limit on the kernel pages KSM may use;
- but note that any process using MADV_MERGEABLE can cause
- KSM to allocate these pages, unswappable until it exits.
- Default: quarter of memory (chosen to not pin too much)
-
pages_to_scan - how many present pages to scan before ksmd goes to sleep
e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
Default: 100 (chosen for demonstration purposes)
@@ -75,7 +67,7 @@ run - set 0 to stop ksmd from running but keep merged pages,
The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
-pages_shared - how many shared unswappable kernel pages KSM is using
+pages_shared - how many shared pages are being used
pages_sharing - how many more sites are sharing them i.e. how much saved
pages_unshared - how many pages unique but repeatedly checked for merging
pages_volatile - how many pages changing too fast to be placed in a tree
@@ -87,4 +79,4 @@ pages_volatile embraces several different kinds of activity, but a high
proportion there would also indicate poor use of madvise MADV_MERGEABLE.
Izik Eidus,
-Hugh Dickins, 24 Sept 2009
+Hugh Dickins, 17 Nov 2009
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index ea44ea502da1..7a7d9bab32ef 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -100,7 +100,7 @@
#define BIT(name) (1ULL << KPF_##name)
#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
-static char *page_flag_names[] = {
+static const char *page_flag_names[] = {
[KPF_LOCKED] = "L:locked",
[KPF_ERROR] = "E:error",
[KPF_REFERENCED] = "R:referenced",
@@ -173,7 +173,7 @@ static int kpageflags_fd;
static int opt_hwpoison;
static int opt_unpoison;
-static char *hwpoison_debug_fs = "/debug/hwpoison";
+static const char hwpoison_debug_fs[] = "/debug/hwpoison";
static int hwpoison_inject_fd;
static int hwpoison_forget_fd;
@@ -560,7 +560,7 @@ static void walk_pfn(unsigned long voffset,
{
uint64_t buf[KPAGEFLAGS_BATCH];
unsigned long batch;
- unsigned long pages;
+ long pages;
unsigned long i;
while (count) {
@@ -673,30 +673,35 @@ static void usage(void)
printf(
"page-types [options]\n"
-" -r|--raw Raw mode, for kernel developers\n"
-" -a|--addr addr-spec Walk a range of pages\n"
-" -b|--bits bits-spec Walk pages with specified bits\n"
-" -p|--pid pid Walk process address space\n"
+" -r|--raw Raw mode, for kernel developers\n"
+" -d|--describe flags Describe flags\n"
+" -a|--addr addr-spec Walk a range of pages\n"
+" -b|--bits bits-spec Walk pages with specified bits\n"
+" -p|--pid pid Walk process address space\n"
#if 0 /* planned features */
-" -f|--file filename Walk file address space\n"
+" -f|--file filename Walk file address space\n"
#endif
-" -l|--list Show page details in ranges\n"
-" -L|--list-each Show page details one by one\n"
-" -N|--no-summary Don't show summay info\n"
-" -X|--hwpoison hwpoison pages\n"
-" -x|--unpoison unpoison pages\n"
-" -h|--help Show this usage message\n"
+" -l|--list Show page details in ranges\n"
+" -L|--list-each Show page details one by one\n"
+" -N|--no-summary Don't show summay info\n"
+" -X|--hwpoison hwpoison pages\n"
+" -x|--unpoison unpoison pages\n"
+" -h|--help Show this usage message\n"
+"flags:\n"
+" 0x10 bitfield format, e.g.\n"
+" anon bit-name, e.g.\n"
+" 0x10,anon comma-separated list, e.g.\n"
"addr-spec:\n"
-" N one page at offset N (unit: pages)\n"
-" N+M pages range from N to N+M-1\n"
-" N,M pages range from N to M-1\n"
-" N, pages range from N to end\n"
-" ,M pages range from 0 to M-1\n"
+" N one page at offset N (unit: pages)\n"
+" N+M pages range from N to N+M-1\n"
+" N,M pages range from N to M-1\n"
+" N, pages range from N to end\n"
+" ,M pages range from 0 to M-1\n"
"bits-spec:\n"
-" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
-" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
-" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
-" =bit1,bit2 flags == (bit1|bit2)\n"
+" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
+" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
+" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
+" =bit1,bit2 flags == (bit1|bit2)\n"
"bit-names:\n"
);
@@ -884,13 +889,23 @@ static void parse_bits_mask(const char *optarg)
add_bits_filter(mask, bits);
}
+static void describe_flags(const char *optarg)
+{
+ uint64_t flags = parse_flag_names(optarg, 0);
-static struct option opts[] = {
+ printf("0x%016llx\t%s\t%s\n",
+ (unsigned long long)flags,
+ page_flag_name(flags),
+ page_flag_longname(flags));
+}
+
+static const struct option opts[] = {
{ "raw" , 0, NULL, 'r' },
{ "pid" , 1, NULL, 'p' },
{ "file" , 1, NULL, 'f' },
{ "addr" , 1, NULL, 'a' },
{ "bits" , 1, NULL, 'b' },
+ { "describe" , 1, NULL, 'd' },
{ "list" , 0, NULL, 'l' },
{ "list-each" , 0, NULL, 'L' },
{ "no-summary", 0, NULL, 'N' },
@@ -907,7 +922,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
while ((c = getopt_long(argc, argv,
- "rp:f:a:b:lLNXxh", opts, NULL)) != -1) {
+ "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) {
switch (c) {
case 'r':
opt_raw = 1;
@@ -924,6 +939,9 @@ int main(int argc, char *argv[])
case 'b':
parse_bits_mask(optarg);
break;
+ case 'd':
+ describe_flags(optarg);
+ exit(0);
case 'l':
opt_list = 1;
break;
diff --git a/MAINTAINERS b/MAINTAINERS
index cff133be42c6..0a32c3ec6b1c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -835,13 +835,13 @@ F: arch/arm/mach-pxa/palmte2.c
F: arch/arm/mach-pxa/include/mach/palmtc.h
F: arch/arm/mach-pxa/palmtc.c
-ARM/PALM TREO 680 SUPPORT
+ARM/PALM TREO SUPPORT
M: Tomas Cech <sleep_walker@suse.cz>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
-F: arch/arm/mach-pxa/include/mach/treo680.h
-F: arch/arm/mach-pxa/treo680.c
+F: arch/arm/mach-pxa/include/mach/palmtreo.h
+F: arch/arm/mach-pxa/palmtreo.c
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
@@ -1482,8 +1482,8 @@ F: include/linux/coda*.h
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
-L: linux-cifs-client@lists.samba.org
-L: samba-technical@lists.samba.org
+L: linux-cifs-client@lists.samba.org (moderated for non-subscribers)
+L: samba-technical@lists.samba.org (moderated for non-subscribers)
W: http://linux-cifs.samba.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
S: Supported
@@ -3081,8 +3081,11 @@ S: Maintained
F: fs/autofs4/
KERNEL BUILD
+M: Michal Marek <mmarek@suse.cz>
+T: git git://repo.or.cz/linux-kbuild.git for-next
+T: git git://repo.or.cz/linux-kbuild.git for-linus
L: linux-kbuild@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/kbuild/
F: Makefile
F: scripts/Makefile.*
@@ -3124,7 +3127,6 @@ L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
F: arch/x86/include/asm/svm.h
-F: arch/x86/kvm/kvm_svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
@@ -5080,6 +5082,7 @@ F: drivers/char/specialix*
SPI SUBSYSTEM
M: David Brownell <dbrownell@users.sourceforge.net>
+M: Grant Likely <grant.likely@secretlab.ca>
L: spi-devel-general@lists.sourceforge.net
S: Maintained
F: Documentation/spi/
@@ -5973,6 +5976,7 @@ M: Mark Brown <broonie@opensource.wolfsonmicro.com>
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
W: http://opensource.wolfsonmicro.com/node/8
S: Supported
+F: Documentation/hwmon/wm83??
F: drivers/leds/leds-wm83*.c
F: drivers/mfd/wm8*.c
F: drivers/power/wm83*.c
@@ -5982,9 +5986,9 @@ F: drivers/video/backlight/wm83*_bl.c
F: drivers/watchdog/wm83*_wdt.c
F: include/linux/mfd/wm831x/
F: include/linux/mfd/wm8350/
-F: include/linux/mfd/wm8400/
-F: sound/soc/codecs/wm8350.c
-F: sound/soc/codecs/wm8400.c
+F: include/linux/mfd/wm8400*
+F: sound/soc/codecs/wm8350.*
+F: sound/soc/codecs/wm8400.*
X.25 NETWORK LAYER
M: Henner Eisen <eis@baty.hanse.de>
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f670..471c07292e0b 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-extern spinlock_t t2_hae_lock;
+extern raw_spinlock_t t2_hae_lock;
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long r0, r1, work, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/*
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, work;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb335..d0faca1e992d 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(x) \
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
-static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void __raw_spin_lock(raw_spinlock_t * lock)
+static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..54c2afce0a1d 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index d9980d47ab81..e6d90568b65d 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,7 +74,7 @@
# define DBG(args)
#endif
-DEFINE_SPINLOCK(t2_hae_lock);
+DEFINE_RAW_SPINLOCK(t2_hae_lock);
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c0de072b8305..5f2cf23c4648 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
#endif
if (irq < ACTUAL_NR_IRQS) {
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
action = irq_desc[irq].action;
if (!action)
goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == ACTUAL_NR_IRQS) {
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index d12af472e1c0..dbbf04f9230e 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/machvec.h>
@@ -79,42 +80,41 @@ static srm_env_t srm_named_entries[] = {
static srm_env_t srm_numbered_entries[256];
-static int
-srm_env_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int srm_env_proc_show(struct seq_file *m, void *v)
{
- int nbytes;
unsigned long ret;
srm_env_t *entry;
+ char *page;
- if (off != 0) {
- *eof = 1;
- return 0;
- }
+ entry = (srm_env_t *)m->private;
+ page = (char *)__get_free_page(GFP_USER);
+ if (!page)
+ return -ENOMEM;
- entry = (srm_env_t *) data;
- ret = callback_getenv(entry->id, page, count);
+ ret = callback_getenv(entry->id, page, PAGE_SIZE);
if ((ret >> 61) == 0) {
- nbytes = (int) ret;
- *eof = 1;
+ seq_write(m, page, ret);
+ ret = 0;
} else
- nbytes = -EFAULT;
+ ret = -EFAULT;
+ free_page((unsigned long)page);
+ return ret;
+}
- return nbytes;
+static int srm_env_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, srm_env_proc_show, PDE(inode)->data);
}
-static int
-srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int res;
- srm_env_t *entry;
+ srm_env_t *entry = PDE(file->f_path.dentry->d_inode)->data;
char *buf = (char *) __get_free_page(GFP_USER);
unsigned long ret1, ret2;
- entry = (srm_env_t *) data;
-
if (!buf)
return -ENOMEM;
@@ -140,6 +140,15 @@ srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
return res;
}
+static const struct file_operations srm_env_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = srm_env_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = srm_env_proc_write,
+};
+
static void
srm_env_cleanup(void)
{
@@ -245,15 +254,10 @@ srm_env_init(void)
*/
entry = srm_named_entries;
while (entry->name && entry->id) {
- entry->proc_entry = create_proc_entry(entry->name,
- 0644, named_dir);
+ entry->proc_entry = proc_create_data(entry->name, 0644, named_dir,
+ &srm_env_proc_fops, entry);
if (!entry->proc_entry)
goto cleanup;
-
- entry->proc_entry->data = (void *) entry;
- entry->proc_entry->read_proc = srm_env_read;
- entry->proc_entry->write_proc = srm_env_write;
-
entry++;
}
@@ -264,15 +268,12 @@ srm_env_init(void)
entry = &srm_numbered_entries[var_num];
entry->name = number[var_num];
- entry->proc_entry = create_proc_entry(entry->name,
- 0644, numbered_dir);
+ entry->proc_entry = proc_create_data(entry->name, 0644, numbered_dir,
+ &srm_env_proc_fops, entry);
if (!entry->proc_entry)
goto cleanup;
entry->id = var_num;
- entry->proc_entry->data = (void *) entry;
- entry->proc_entry->read_proc = srm_env_read;
- entry->proc_entry->write_proc = srm_env_write;
}
printk(KERN_INFO "%s: version %s loaded successfully\n", NAME,
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf8a99f19dc4..233a222752c0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -603,6 +603,7 @@ config ARCH_SA1100
select ARCH_SPARSEMEM_ENABLE
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
+ select CPU_FREQ
select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
@@ -1359,13 +1360,9 @@ source "drivers/cpufreq/Kconfig"
config CPU_FREQ_SA1100
bool
- depends on CPU_FREQ && (SA1100_H3100 || SA1100_H3600 || SA1100_LART || SA1100_PLEB || SA1100_BADGE4 || SA1100_HACKKIT)
- default y
config CPU_FREQ_SA1110
bool
- depends on CPU_FREQ && (SA1100_ASSABET || SA1100_CERF || SA1100_PT_SYSTEM3)
- default y
config CPU_FREQ_INTEGRATOR
tristate "CPUfreq driver for ARM Integrator CPUs"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ff54c23d085e..5cb9326df7a7 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -71,6 +71,14 @@ config DEBUG_LL
in the kernel. This is helpful if you are debugging code that
executes before the console is initialized.
+config EARLY_PRINTK
+ bool "Early printk"
+ depends on DEBUG_LL
+ help
+ Say Y here if you want to have an early console using the
+ kernel low-level debugging functions. Add earlyprintk to your
+ kernel parameters to enable this console.
+
config DEBUG_ICEDCC
bool "Kernel low-level debugging via EmbeddedICE DCC channel"
depends on DEBUG_LL
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
new file mode 100644
index 000000000000..823b11e7091a
--- /dev/null
+++ b/arch/arm/configs/zeus_defconfig
@@ -0,0 +1,2032 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Dec 8 20:27:05 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=13
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
+
+#
+# Intel PXA2xx/PXA3xx Implementations
+#
+
+#
+# Intel/Marvell Dev Platforms (sorted by hardware release time)
+#
+# CONFIG_ARCH_LUBBOCK is not set
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_MACH_ZYLONITE300 is not set
+# CONFIG_MACH_ZYLONITE320 is not set
+# CONFIG_MACH_LITTLETON is not set
+# CONFIG_MACH_TAVOREVB is not set
+# CONFIG_MACH_SAAR is not set
+
+#
+# Third Party Dev Platforms (sorted by vendor name)
+#
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_ARCH_VIPER is not set
+CONFIG_MACH_ARCOM_ZEUS=y
+# CONFIG_MACH_BALLOON3 is not set
+# CONFIG_MACH_CSB726 is not set
+# CONFIG_MACH_ARMCORE is not set
+# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
+# CONFIG_MACH_CM_X300 is not set
+# CONFIG_ARCH_GUMSTIX is not set
+# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
+# CONFIG_MACH_XCEP is not set
+# CONFIG_TRIZEPS_PXA is not set
+CONFIG_ARCOM_PCMCIA=y
+# CONFIG_MACH_LOGICPD_PXA270 is not set
+# CONFIG_MACH_PCM027 is not set
+# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
+
+#
+# End-user Products (sorted by vendor name)
+#
+# CONFIG_MACH_H4700 is not set
+# CONFIG_MACH_H5000 is not set
+# CONFIG_MACH_HIMALAYA is not set
+# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_MIOA701 is not set
+# CONFIG_PXA_EZX is not set
+# CONFIG_MACH_MP900C is not set
+# CONFIG_ARCH_PXA_PALM is not set
+# CONFIG_PXA_SHARPSL is not set
+# CONFIG_ARCH_PXA_ESERIES is not set
+CONFIG_PXA27x=y
+CONFIG_PXA_SSP=y
+CONFIG_PXA_HAVE_BOARD_IRQS=y
+CONFIG_PXA_HAVE_ISA_IRQS=y
+CONFIG_PLAT_PXA=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_IWMMXT=y
+CONFIG_XSCALE_PMU=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ISA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_PCCARD=m
+CONFIG_PCMCIA=m
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+# CONFIG_I82365 is not set
+# CONFIG_TCIC is not set
+CONFIG_PCMCIA_SOC_COMMON=m
+CONFIG_PCMCIA_PXA2XX=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA_PROBE=y
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=31:02 rootfstype=jffs2 ro console=ttyS0,115200"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+# CONFIG_BT_SCO is not set
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+# CONFIG_BT_BNEP_MC_FILTER is not set
+# CONFIG_BT_BNEP_PROTO_FILTER is not set
+# CONFIG_BT_HIDP is not set
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIDTL1 is not set
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBLUECARD is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+CONFIG_MTD_REDBOOT_PARTS_READONLY=y
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+# CONFIG_MTD_CFI_I2 is not set
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+CONFIG_MTD_PXA2XX=y
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+# CONFIG_PNP is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=m
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+# CONFIG_SATA_PMP is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
+# CONFIG_PATA_LEGACY is not set
+CONFIG_PATA_PCMCIA=m
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_SMC91X is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_DNET is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+CONFIG_HERMES=m
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PCMCIA_HERMES=m
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_P54_COMMON is not set
+CONFIG_RT2X00=m
+# CONFIG_RT2500USB is not set
+CONFIG_RT73USB=m
+# CONFIG_RT2800USB is not set
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+# CONFIG_WL12XX is not set
+# CONFIG_ZD1211RW is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_NET_PCMCIA=y
+# CONFIG_PCMCIA_3C589 is not set
+# CONFIG_PCMCIA_3C574 is not set
+# CONFIG_PCMCIA_FMVJ18X is not set
+# CONFIG_PCMCIA_PCNET is not set
+# CONFIG_PCMCIA_NMCLAN is not set
+# CONFIG_PCMCIA_SMC91C92 is not set
+# CONFIG_PCMCIA_XIRC2PS is not set
+# CONFIG_PCMCIA_AXNET is not set
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+CONFIG_TOUCHSCREEN_FUJITSU=m
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+CONFIG_TOUCHSCREEN_ELO=m
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+CONFIG_TOUCHSCREEN_MTOUCH=m
+CONFIG_TOUCHSCREEN_INEXIO=m
+# CONFIG_TOUCHSCREEN_MK712 is not set
+CONFIG_TOUCHSCREEN_HTCPEN=m
+CONFIG_TOUCHSCREEN_PENMOUNT=m
+CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
+CONFIG_TOUCHSCREEN_TOUCHWIN=m
+# CONFIG_TOUCHSCREEN_WM97XX is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+CONFIG_TOUCHSCREEN_TOUCHIT213=m
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_CS is not set
+CONFIG_SERIAL_8250_NR_UARTS=7
+CONFIG_SERIAL_8250_RUNTIME_UARTS=7
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_PXA is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PXA=y
+# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_ELEKTOR is not set
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_PXA2XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=m
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_SA1100_WATCHDOG is not set
+
+#
+# ISA-based Watchdog Cards
+#
+# CONFIG_PCWATCHDOG is not set
+# CONFIG_MIXCOMWD is not set
+# CONFIG_WDT is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_PXA=m
+# CONFIG_FB_PXA_OVERLAY is not set
+# CONFIG_FB_PXA_SMARTPANEL is not set
+CONFIG_FB_PXA_PARAMETERS=y
+# CONFIG_FB_MBX is not set
+# CONFIG_FB_W100 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_LMS283GF05 is not set
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=m
+CONFIG_BACKLIGHT_GENERIC=m
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=m
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_PXA2XX_PCM=m
+CONFIG_SND_PXA2XX_LIB=m
+CONFIG_SND_PXA2XX_LIB_AC97=y
+CONFIG_SND_PXA2XX_AC97=m
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_PCMCIA is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_PXA2XX_SOC=m
+CONFIG_SND_SOC_I2C_AND_SPI=m
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=m
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=m
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=m
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_PXA=y
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=m
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+CONFIG_RTC_DRV_ISL1208=m
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_SA1100 is not set
+CONFIG_RTC_DRV_PXA=m
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=m
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+CONFIG_DEBUG_ERRORS=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=m
+CONFIG_CRYPTO_ALGAPI2=m
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4ea..8920b2d6e3b8 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
*/
#define do_bad_IRQ(irq,desc) \
do { \
- spin_lock(&desc->lock); \
+ raw_spin_lock(&desc->lock); \
handle_bad_irq(irq, desc); \
- spin_unlock(&desc->lock); \
+ raw_spin_unlock(&desc->lock); \
} while(0)
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..c91c64cab922 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
* Locked value: 1
*/
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..d14d197ae04a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e7ccf7e697ce..dd00f747e2ad 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -54,5 +54,6 @@ endif
head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
new file mode 100644
index 000000000000..85aa2b292692
--- /dev/null
+++ b/arch/arm/kernel/early_printk.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/arm/kernel/early_printk.c
+ *
+ * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+extern void printch(int);
+
+static void early_write(const char *s, unsigned n)
+{
+ while (n-- > 0) {
+ if (*s == '\n')
+ printch('\r');
+ printch(*s);
+ s++;
+ }
+}
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ early_write(s, n);
+}
+
+static struct console early_console = {
+ .name = "earlycon",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+asmlinkage void early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ early_write(buf, n);
+ va_end(ap);
+}
+
+static int __init setup_early_printk(char *buf)
+{
+ register_console(&early_console);
+ return 0;
+}
+
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c9a8619f3856..b7cb45bb91e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_FIQ
show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
}
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a73a34dccf2a..ea02a7b1c244 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -160,6 +160,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
/* Make sure our local interrupt controller has this enabled */
local_irq_save(flags);
+ irq_to_desc(clk->irq)->status |= IRQ_NOPROBE;
get_irq_chip(clk->irq)->unmask(clk->irq);
local_irq_restore(flags);
diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h
new file mode 100644
index 000000000000..998cb0c07135
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
+#ifndef __MACH_ATMEL_MCI_H
+#define __MACH_ATMEL_MCI_H
+
+#include <mach/at_hdmac.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+ struct at_dma_slave sdata;
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#define setup_dma_addr(s, t, r) do { \
+ if (s) { \
+ (s)->sdata.tx_reg = (t); \
+ (s)->sdata.rx_reg = (r); \
+ } \
+} while (0)
+
+#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index e522b20bcbc2..f70d52be48a2 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -30,6 +30,8 @@
#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
+#define __pfn_to_bus(x) (__pfn_to_phys(x) - PHYS_OFFSET)
+#define __bus_to_pfn(x) __phys_to_pfn((x) + PHYS_OFFSET)
#endif
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index b97f529e58e8..41febc796b1c 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -201,6 +201,11 @@ void __init footbridge_map_io(void)
#ifdef CONFIG_FOOTBRIDGE_ADDIN
+static inline unsigned long fb_bus_sdram_offset(void)
+{
+ return *CSR_PCISDRAMBASE & 0xfffffff0;
+}
+
/*
* These two functions convert virtual addresses to PCI addresses and PCI
* addresses to virtual addresses. Note that it is only legal to use these
@@ -210,14 +215,13 @@ unsigned long __virt_to_bus(unsigned long res)
{
WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
- return (res - PAGE_OFFSET) + (*CSR_PCISDRAMBASE & 0xfffffff0);
+ return res + (fb_bus_sdram_offset() - PAGE_OFFSET);
}
EXPORT_SYMBOL(__virt_to_bus);
unsigned long __bus_to_virt(unsigned long res)
{
- res -= (*CSR_PCISDRAMBASE & 0xfffffff0);
- res += PAGE_OFFSET;
+ res = res - (fb_bus_sdram_offset() - PAGE_OFFSET);
WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
@@ -225,4 +229,16 @@ unsigned long __bus_to_virt(unsigned long res)
}
EXPORT_SYMBOL(__bus_to_virt);
+unsigned long __pfn_to_bus(unsigned long pfn)
+{
+ return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
+}
+EXPORT_SYMBOL(__pfn_to_bus);
+
+unsigned long __bus_to_pfn(unsigned long bus)
+{
+ return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
+}
+EXPORT_SYMBOL(__bus_to_pfn);
+
#endif
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index cb16e59d87b6..8d64f4574087 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -29,6 +29,8 @@
#ifndef __ASSEMBLY__
extern unsigned long __virt_to_bus(unsigned long);
extern unsigned long __bus_to_virt(unsigned long);
+extern unsigned long __pfn_to_bus(unsigned long);
+extern unsigned long __bus_to_pfn(unsigned long);
#endif
#define __virt_to_bus __virt_to_bus
#define __bus_to_virt __bus_to_virt
@@ -36,14 +38,15 @@ extern unsigned long __bus_to_virt(unsigned long);
#elif defined(CONFIG_FOOTBRIDGE_HOST)
/*
- * The footbridge is programmed to expose the system RAM at the corresponding
- * address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000.
- * If 0x80000000, then its exposed at 0xa0000000 on the bus. etc.
- * The only requirement is that the RAM isn't placed at bus address 0 which
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
* would clash with VGA cards.
*/
-#define __virt_to_bus(x) ((x) - 0xe0000000)
-#define __bus_to_virt(x) ((x) + 0xe0000000)
+#define BUS_OFFSET 0xe0000000
+#define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET))
+#define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
+#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
+#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
#else
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 4891828454f5..991f24d2c115 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -28,6 +28,7 @@
#define BUS_OFFSET UL(0x80000000)
#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET)
-#define __pfn_to_bus(x) (((x) << PAGE_SHIFT) + BUS_OFFSET)
+#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
+#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h
index aee7eb8a71b2..98e3471be15b 100644
--- a/arch/arm/mach-ixp2000/include/mach/memory.h
+++ b/arch/arm/mach-ixp2000/include/mach/memory.h
@@ -17,11 +17,15 @@
#include <mach/ixp2000-regs.h>
-#define __virt_to_bus(v) \
- (((__virt_to_phys(v) - 0x0) + (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)))
+#define IXP2000_PCI_SDRAM_OFFSET (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)
-#define __bus_to_virt(b) \
- __phys_to_virt((((b - (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)) + 0x0)))
+#define __phys_to_bus(x) ((x) + (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+#define __bus_to_phys(x) ((x) - (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
#endif
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index fdd138706c70..94a3a86cfeb8 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -19,16 +19,15 @@
*/
#define PHYS_OFFSET (0x00000000)
-#define __virt_to_bus(v) \
- ({ unsigned int ret; \
- ret = ((__virt_to_phys(v) - 0x00000000) + \
- (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)); \
- ret; })
-
-#define __bus_to_virt(b) \
- ({ unsigned int data; \
- data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \
- __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
+#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0))
+
+#define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+#define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
#define arch_is_coherent() 1
diff --git a/arch/arm/mach-lh7a40x/clocks.c b/arch/arm/mach-lh7a40x/clocks.c
index 6182f5410b4d..fcaf876f19b6 100644
--- a/arch/arm/mach-lh7a40x/clocks.c
+++ b/arch/arm/mach-lh7a40x/clocks.c
@@ -7,8 +7,6 @@
* version 2 as published by the Free Software Foundation.
*
*/
-
-#include <linux/cpufreq.h>
#include <mach/hardware.h>
#include <mach/clocks.h>
#include <linux/err.h>
@@ -31,12 +29,6 @@ struct clk {
#define HCLKDIV(c) (((c) >> 0) & 0x02)
#define PCLKDIV(c) (((c) >> 16) & 0x03)
-unsigned int cpufreq_get (unsigned int cpu) /* in kHz */
-{
- return fclkfreq_get ()/1000;
-}
-EXPORT_SYMBOL(cpufreq_get);
-
unsigned int fclkfreq_get (void)
{
unsigned int clkset = CSC_CLKSET;
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index feb0e54a91de..038f24d47023 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
BUG_ON(desc->status & IRQ_INPROGRESS);
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
goto out_mask;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
* Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
/* ack unconditionally to unmask lower prio irqs */
desc->chip->ack(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
#define handle_irq handle_prio_irq
#endif
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index db9374bc528b..e508904fb67e 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -19,7 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 4cfb7b68dfad..c90b0d0b1927 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -20,7 +20,7 @@
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <linux/io.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 37431738f1c2..995d4a2b2dfd 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -24,7 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 6ada8029f9a8..231cb4ec1847 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -29,7 +29,7 @@
#include <linux/mtd/nand.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 6f6c601eeab7..ef17cf1ab6d7 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -24,7 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5b78a87217e0..d192dd98a591 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -26,7 +26,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <linux/mtd/mtd.h>
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index bf26ad31f9ba..17f3c91231db 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -402,15 +402,9 @@ static struct twl4030_usb_data rx51_usb_data = {
static struct twl4030_ins sleep_on_seq[] __initdata = {
/*
- * Turn off VDD1 and VDD2.
+ * Turn off everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0xf, RES_STATE_OFF), 4},
- {MSG_SINGULAR(DEV_GRP_P1, 0x10, RES_STATE_OFF), 2},
-/*
- * And also turn off the OMAP3 PLLs and the sysclk output.
- */
- {MSG_SINGULAR(DEV_GRP_P1, 0x7, RES_STATE_OFF), 3},
- {MSG_SINGULAR(DEV_GRP_P1, 0x17, RES_STATE_OFF), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2},
};
static struct twl4030_script sleep_on_script __initdata = {
@@ -421,14 +415,9 @@ static struct twl4030_script sleep_on_script __initdata = {
static struct twl4030_ins wakeup_seq[] __initdata = {
/*
- * Reenable the OMAP3 PLLs.
- * Wakeup VDD1 and VDD2.
- * Reenable sysclk output.
+ * Reenable everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0x7, RES_STATE_ACTIVE), 0x30},
- {MSG_SINGULAR(DEV_GRP_P1, 0xf, RES_STATE_ACTIVE), 0x30},
- {MSG_SINGULAR(DEV_GRP_P1, 0x10, RES_STATE_ACTIVE), 0x37},
- {MSG_SINGULAR(DEV_GRP_P1, 0x19, RES_STATE_ACTIVE), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
};
static struct twl4030_script wakeup_script __initdata = {
@@ -439,10 +428,9 @@ static struct twl4030_script wakeup_script __initdata = {
static struct twl4030_ins wakeup_p3_seq[] __initdata = {
/*
- * Wakeup VDD1 (dummy to be able to insert a delay)
- * Enable CLKEN
+ * Reenable everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0x17, RES_STATE_ACTIVE), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
};
static struct twl4030_script wakeup_p3_script __initdata = {
@@ -463,12 +451,11 @@ static struct twl4030_ins wrst_seq[] __initdata = {
{MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 2},
{MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_ACTIVE),
0x13},
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 2, RES_STATE_WRST), 0x13},
{MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x35},
- {MSG_SINGULAR(DEV_GRP_P1, RES_HFCLKOUT, RES_STATE_ACTIVE), 2},
+ {MSG_SINGULAR(DEV_GRP_P3, RES_HFCLKOUT, RES_STATE_ACTIVE), 2},
{MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 2},
};
@@ -490,22 +477,81 @@ static struct twl4030_script *twl4030_scripts[] __initdata = {
};
static struct twl4030_resconfig twl4030_rconfig[] __initdata = {
- { .resource = RES_VINTANA1, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VINTANA2, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VINTDIG, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VMMC1, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VMMC2, .devgroup = DEV_GRP_NULL, .type = -1,
- .type2 = 3},
- { .resource = RES_VAUX1, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX2, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX3, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX4, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VPLL2, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VDAC, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VSIM, .devgroup = DEV_GRP_NULL, .type = -1,
- .type2 = 3},
- { .resource = RES_CLKEN, .devgroup = DEV_GRP_P3, .type = -1,
- .type2 = 1 },
+ { .resource = RES_VDD1, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VDD2, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VPLL1, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VPLL2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX1, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX3, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX4, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VMMC1, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VMMC2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VDAC, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VSIM, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VIO, .devgroup = DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1 , .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_32KCLKOUT, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_RESET, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_Main_Ref, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
{ 0, 0},
};
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index d89c6adbe8bc..e6d8e10ae5d1 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -63,6 +63,15 @@ config ARCH_VIPER
select HAVE_PWM
select PXA_HAVE_BOARD_IRQS
select PXA_HAVE_ISA_IRQS
+ select ARCOM_PCMCIA
+
+config MACH_ARCOM_ZEUS
+ bool "Arcom/Eurotech ZEUS SBC"
+ select PXA27x
+ select ISA
+ select PXA_HAVE_BOARD_IRQS
+ select PXA_HAVE_ISA_IRQS
+ select ARCOM_PCMCIA
config MACH_BALLOON3
bool "Balloon 3 board"
@@ -179,6 +188,11 @@ config MACH_TRIZEPS_ANY
endchoice
+config ARCOM_PCMCIA
+ bool
+ help
+ Generic option for Arcom Viper/Zeus PCMCIA
+
config TRIZEPS_PCMCIA
bool
help
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index b5d29e60a341..f64afda7e6f6 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_MACH_SAAR) += saar.o
# 3rd Party Dev Platforms
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
+obj-$(CONFIG_MACH_ARCOM_ZEUS) += zeus.o
obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
obj-$(CONFIG_MACH_CSB726) += csb726.o
obj-$(CONFIG_CSB726_CSB701) += csb701.o
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 1c0de808b54d..c8a01bc85fde 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -497,16 +497,15 @@ static int em_x270_usb_hub_init(void)
goto err_free_vbus_gpio;
/* USB Hub power-on and reset */
- gpio_direction_output(usb_hub_reset, 0);
+ gpio_direction_output(usb_hub_reset, 1);
+ gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 1);
gpio_set_value(usb_hub_reset, 0);
+ gpio_set_value(usb_hub_reset, 1);
regulator_disable(em_x270_usb_ldo);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 1);
-
- /* enable VBUS */
- gpio_direction_output(GPIO9_USB_VBUS_EN, 1);
+ gpio_set_value(usb_hub_reset, 0);
+ gpio_set_value(GPIO9_USB_VBUS_EN, 1);
return 0;
diff --git a/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
new file mode 100644
index 000000000000..d428be4db44c
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
@@ -0,0 +1,11 @@
+#ifndef __ARCOM_PCMCIA_H
+#define __ARCOM_PCMCIA_H
+
+struct arcom_pcmcia_pdata {
+ int cd_gpio;
+ int rdy_gpio;
+ int pwr_gpio;
+ void (*reset)(int state);
+};
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/viper.h b/arch/arm/mach-pxa/include/mach/viper.h
index 10988c270ca3..5f5fbf1f6489 100644
--- a/arch/arm/mach-pxa/include/mach/viper.h
+++ b/arch/arm/mach-pxa/include/mach/viper.h
@@ -85,8 +85,6 @@
/* Interrupt and Configuration Register (VIPER_ICR) */
/* This is a write only register. Only CF_RST is used under Linux */
-extern void viper_cf_rst(int state);
-
#define VIPER_ICR_RETRIG (1 << 0)
#define VIPER_ICR_AUTO_CLR (1 << 1)
#define VIPER_ICR_R_DIS (1 << 2)
diff --git a/arch/arm/mach-pxa/include/mach/zeus.h b/arch/arm/mach-pxa/include/mach/zeus.h
new file mode 100644
index 000000000000..c387046d2f28
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/zeus.h
@@ -0,0 +1,82 @@
+/*
+ * arch/arm/mach-pxa/include/mach/zeus.h
+ *
+ * Author: David Vrabel
+ * Created: Sept 28, 2005
+ * Copyright: Arcom Control Systems Ltd.
+ *
+ * Maintained by: Marc Zyngier <maz@misterjones.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MACH_ZEUS_H
+#define _MACH_ZEUS_H
+
+/* Physical addresses */
+#define ZEUS_FLASH_PHYS PXA_CS0_PHYS
+#define ZEUS_ETH0_PHYS PXA_CS1_PHYS
+#define ZEUS_ETH1_PHYS PXA_CS2_PHYS
+#define ZEUS_CPLD_PHYS (PXA_CS4_PHYS+0x2000000)
+#define ZEUS_SRAM_PHYS PXA_CS5_PHYS
+#define ZEUS_PC104IO_PHYS (0x30000000)
+
+#define ZEUS_CPLD_VERSION_PHYS (ZEUS_CPLD_PHYS + 0x00000000)
+#define ZEUS_CPLD_ISA_IRQ_PHYS (ZEUS_CPLD_PHYS + 0x00800000)
+#define ZEUS_CPLD_CONTROL_PHYS (ZEUS_CPLD_PHYS + 0x01000000)
+#define ZEUS_CPLD_EXTWDOG_PHYS (ZEUS_CPLD_PHYS + 0x01800000)
+
+/* GPIOs */
+#define ZEUS_AC97_GPIO 0
+#define ZEUS_WAKEUP_GPIO 1
+#define ZEUS_UARTA_GPIO 9
+#define ZEUS_UARTB_GPIO 10
+#define ZEUS_UARTC_GPIO 12
+#define ZEUS_UARTD_GPIO 11
+#define ZEUS_ETH0_GPIO 14
+#define ZEUS_ISA_GPIO 17
+#define ZEUS_BKLEN_GPIO 19
+#define ZEUS_USB2_PWREN_GPIO 22
+#define ZEUS_PTT_GPIO 27
+#define ZEUS_CF_CD_GPIO 35
+#define ZEUS_MMC_WP_GPIO 52
+#define ZEUS_MMC_CD_GPIO 53
+#define ZEUS_EXTGPIO_GPIO 91
+#define ZEUS_CF_PWEN_GPIO 97
+#define ZEUS_CF_RDY_GPIO 99
+#define ZEUS_LCD_EN_GPIO 101
+#define ZEUS_ETH1_GPIO 113
+#define ZEUS_CAN_GPIO 116
+
+#define ZEUS_EXT0_GPIO_BASE 128
+#define ZEUS_EXT1_GPIO_BASE 160
+#define ZEUS_USER_GPIO_BASE 192
+
+#define ZEUS_EXT0_GPIO(x) (ZEUS_EXT0_GPIO_BASE + (x))
+#define ZEUS_EXT1_GPIO(x) (ZEUS_EXT1_GPIO_BASE + (x))
+#define ZEUS_USER_GPIO(x) (ZEUS_USER_GPIO_BASE + (x))
+
+/*
+ * CPLD registers:
+ * Only 4 registers, but spreaded over a 32MB address space.
+ * Be gentle, and remap that over 32kB...
+ */
+
+#define ZEUS_CPLD (0xf0000000)
+#define ZEUS_CPLD_VERSION (ZEUS_CPLD + 0x0000)
+#define ZEUS_CPLD_ISA_IRQ (ZEUS_CPLD + 0x1000)
+#define ZEUS_CPLD_CONTROL (ZEUS_CPLD + 0x2000)
+#define ZEUS_CPLD_EXTWDOG (ZEUS_CPLD + 0x3000)
+
+/* CPLD register bits */
+#define ZEUS_CPLD_CONTROL_CF_RST 0x01
+
+#define ZEUS_PC104IO (0xf1000000)
+
+#define ZEUS_SRAM_SIZE (256 * 1024)
+
+#endif
+
+
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index cf0d71b7797e..5352b4e5a7dd 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -47,6 +47,7 @@
#include <mach/pxafb.h>
#include <plat/i2c.h>
#include <mach/regs-uart.h>
+#include <mach/arcom-pcmcia.h>
#include <mach/viper.h>
#include <asm/setup.h>
@@ -76,14 +77,28 @@ static void viper_icr_clear_bit(unsigned int bit)
}
/* This function is used from the pcmcia module to reset the CF */
-void viper_cf_rst(int state)
+static void viper_cf_reset(int state)
{
if (state)
viper_icr_set_bit(VIPER_ICR_CF_RST);
else
viper_icr_clear_bit(VIPER_ICR_CF_RST);
}
-EXPORT_SYMBOL(viper_cf_rst);
+
+static struct arcom_pcmcia_pdata viper_pcmcia_info = {
+ .cd_gpio = VIPER_CF_CD_GPIO,
+ .rdy_gpio = VIPER_CF_RDY_GPIO,
+ .pwr_gpio = VIPER_CF_POWER_GPIO,
+ .reset = viper_cf_reset,
+};
+
+static struct platform_device viper_pcmcia_device = {
+ .name = "viper-pcmcia",
+ .id = -1,
+ .dev = {
+ .platform_data = &viper_pcmcia_info,
+ },
+};
/*
* The CPLD version register was not present on VIPER boards prior to
@@ -685,6 +700,7 @@ static struct platform_device *viper_devs[] __initdata = {
&viper_mtd_devices[0],
&viper_mtd_devices[1],
&viper_backlight_device,
+ &viper_pcmcia_device,
};
static mfp_cfg_t viper_pin_config[] __initdata = {
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
new file mode 100644
index 000000000000..5b986a8bd9e6
--- /dev/null
+++ b/arch/arm/mach-pxa/zeus.c
@@ -0,0 +1,820 @@
+/*
+ * Support for the Arcom ZEUS.
+ *
+ * Copyright (C) 2006 Arcom Control Systems Ltd.
+ *
+ * Loosely based on Arcom's 2.6.16.28.
+ * Maintained by Marc Zyngier <maz@misterjones.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/serial_8250.h>
+#include <linux/dm9000.h>
+#include <linux/mmc/host.h>
+#include <linux/spi/spi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/i2c.h>
+
+#include <mach/pxa2xx-regs.h>
+#include <mach/regs-uart.h>
+#include <mach/ohci.h>
+#include <mach/mmc.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/udc.h>
+#include <mach/pxafb.h>
+#include <mach/pxa2xx_spi.h>
+#include <mach/mfp-pxa27x.h>
+#include <mach/pm.h>
+#include <mach/audio.h>
+#include <mach/arcom-pcmcia.h>
+#include <mach/zeus.h>
+
+#include "generic.h"
+
+/*
+ * Interrupt handling
+ */
+
+static unsigned long zeus_irq_enabled_mask;
+static const int zeus_isa_irqs[] = { 3, 4, 5, 6, 7, 10, 11, 12, };
+static const int zeus_isa_irq_map[] = {
+ 0, /* ISA irq #0, invalid */
+ 0, /* ISA irq #1, invalid */
+ 0, /* ISA irq #2, invalid */
+ 1 << 0, /* ISA irq #3 */
+ 1 << 1, /* ISA irq #4 */
+ 1 << 2, /* ISA irq #5 */
+ 1 << 3, /* ISA irq #6 */
+ 1 << 4, /* ISA irq #7 */
+ 0, /* ISA irq #8, invalid */
+ 0, /* ISA irq #9, invalid */
+ 1 << 5, /* ISA irq #10 */
+ 1 << 6, /* ISA irq #11 */
+ 1 << 7, /* ISA irq #12 */
+};
+
+static inline int zeus_irq_to_bitmask(unsigned int irq)
+{
+ return zeus_isa_irq_map[irq - PXA_ISA_IRQ(0)];
+}
+
+static inline int zeus_bit_to_irq(int bit)
+{
+ return zeus_isa_irqs[bit] + PXA_ISA_IRQ(0);
+}
+
+static void zeus_ack_irq(unsigned int irq)
+{
+ __raw_writew(zeus_irq_to_bitmask(irq), ZEUS_CPLD_ISA_IRQ);
+}
+
+static void zeus_mask_irq(unsigned int irq)
+{
+ zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(irq));
+}
+
+static void zeus_unmask_irq(unsigned int irq)
+{
+ zeus_irq_enabled_mask |= zeus_irq_to_bitmask(irq);
+}
+
+static inline unsigned long zeus_irq_pending(void)
+{
+ return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
+}
+
+static void zeus_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long pending;
+
+ pending = zeus_irq_pending();
+ do {
+ /* we're in a chained irq handler,
+ * so ack the interrupt by hand */
+ desc->chip->ack(gpio_to_irq(ZEUS_ISA_GPIO));
+
+ if (likely(pending)) {
+ irq = zeus_bit_to_irq(__ffs(pending));
+ generic_handle_irq(irq);
+ }
+ pending = zeus_irq_pending();
+ } while (pending);
+}
+
+static struct irq_chip zeus_irq_chip = {
+ .name = "ISA",
+ .ack = zeus_ack_irq,
+ .mask = zeus_mask_irq,
+ .unmask = zeus_unmask_irq,
+};
+
+static void __init zeus_init_irq(void)
+{
+ int level;
+ int isa_irq;
+
+ pxa27x_init_irq();
+
+ /* Peripheral IRQs. It would be nice to move those inside driver
+ configuration, but it is not supported at the moment. */
+ set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING);
+ set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING);
+
+ /* Setup ISA IRQs */
+ for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) {
+ isa_irq = zeus_bit_to_irq(level);
+ set_irq_chip(isa_irq, &zeus_irq_chip);
+ set_irq_handler(isa_irq, handle_edge_irq);
+ set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler);
+}
+
+
+/*
+ * Platform devices
+ */
+
+/* Flash */
+static struct resource zeus_mtd_resources[] = {
+ [0] = { /* NOR Flash (up to 64MB) */
+ .start = ZEUS_FLASH_PHYS,
+ .end = ZEUS_FLASH_PHYS + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* SRAM */
+ .start = ZEUS_SRAM_PHYS,
+ .end = ZEUS_SRAM_PHYS + SZ_512K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct physmap_flash_data zeus_flash_data[] = {
+ [0] = {
+ .width = 2,
+ .parts = NULL,
+ .nr_parts = 0,
+ },
+};
+
+static struct platform_device zeus_mtd_devices[] = {
+ [0] = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &zeus_flash_data[0],
+ },
+ .resource = &zeus_mtd_resources[0],
+ .num_resources = 1,
+ },
+};
+
+/* Serial */
+static struct resource zeus_serial_resources[] = {
+ {
+ .start = 0x10000000,
+ .end = 0x1000000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x10800000,
+ .end = 0x1080000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x11000000,
+ .end = 0x1100000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40100000,
+ .end = 0x4010001f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40200000,
+ .end = 0x4020001f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40700000,
+ .end = 0x4070001f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ /* External UARTs */
+ /* FIXME: Shared IRQs on COM1-COM4 will not work properly on v1i1 hardware. */
+ { /* COM1 */
+ .mapbase = 0x10000000,
+ .irq = gpio_to_irq(ZEUS_UARTA_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM2 */
+ .mapbase = 0x10800000,
+ .irq = gpio_to_irq(ZEUS_UARTB_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM3 */
+ .mapbase = 0x11000000,
+ .irq = gpio_to_irq(ZEUS_UARTC_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM4 */
+ .mapbase = 0x11800000,
+ .irq = gpio_to_irq(ZEUS_UARTD_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ /* Internal UARTs */
+ { /* FFUART */
+ .membase = (void *)&FFUART,
+ .mapbase = __PREG(FFUART),
+ .irq = IRQ_FFUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* BTUART */
+ .membase = (void *)&BTUART,
+ .mapbase = __PREG(BTUART),
+ .irq = IRQ_BTUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* STUART */
+ .membase = (void *)&STUART,
+ .mapbase = __PREG(STUART),
+ .irq = IRQ_STUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { },
+};
+
+static struct platform_device zeus_serial_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(zeus_serial_resources),
+ .resource = zeus_serial_resources,
+};
+
+/* Ethernet */
+static struct resource zeus_dm9k0_resource[] = {
+ [0] = {
+ .start = ZEUS_ETH0_PHYS,
+ .end = ZEUS_ETH0_PHYS + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = ZEUS_ETH0_PHYS + 2,
+ .end = ZEUS_ETH0_PHYS + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = gpio_to_irq(ZEUS_ETH0_GPIO),
+ .end = gpio_to_irq(ZEUS_ETH0_GPIO),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+};
+
+static struct resource zeus_dm9k1_resource[] = {
+ [0] = {
+ .start = ZEUS_ETH1_PHYS,
+ .end = ZEUS_ETH1_PHYS + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = ZEUS_ETH1_PHYS + 2,
+ .end = ZEUS_ETH1_PHYS + 3,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = gpio_to_irq(ZEUS_ETH1_GPIO),
+ .end = gpio_to_irq(ZEUS_ETH1_GPIO),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+};
+
+static struct dm9000_plat_data zeus_dm9k_platdata = {
+ .flags = DM9000_PLATF_16BITONLY,
+};
+
+static struct platform_device zeus_dm9k0_device = {
+ .name = "dm9000",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(zeus_dm9k0_resource),
+ .resource = zeus_dm9k0_resource,
+ .dev = {
+ .platform_data = &zeus_dm9k_platdata,
+ }
+};
+
+static struct platform_device zeus_dm9k1_device = {
+ .name = "dm9000",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(zeus_dm9k1_resource),
+ .resource = zeus_dm9k1_resource,
+ .dev = {
+ .platform_data = &zeus_dm9k_platdata,
+ }
+};
+
+/* External SRAM */
+static struct resource zeus_sram_resource = {
+ .start = ZEUS_SRAM_PHYS,
+ .end = ZEUS_SRAM_PHYS + ZEUS_SRAM_SIZE * 2 - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device zeus_sram_device = {
+ .name = "pxa2xx-8bit-sram",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &zeus_sram_resource,
+};
+
+/* SPI interface on SSP3 */
+static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
+ .num_chipselect = 1,
+ .enable_dma = 1,
+};
+
+static struct platform_device pxa2xx_spi_ssp3_device = {
+ .name = "pxa2xx-spi",
+ .id = 3,
+ .dev = {
+ .platform_data = &pxa2xx_spi_ssp3_master_info,
+ },
+};
+
+/* Leds */
+static struct gpio_led zeus_leds[] = {
+ [0] = {
+ .name = "zeus:yellow:1",
+ .default_trigger = "heartbeat",
+ .gpio = ZEUS_EXT0_GPIO(3),
+ .active_low = 1,
+ },
+ [1] = {
+ .name = "zeus:yellow:2",
+ .default_trigger = "default-on",
+ .gpio = ZEUS_EXT0_GPIO(4),
+ .active_low = 1,
+ },
+ [2] = {
+ .name = "zeus:yellow:3",
+ .default_trigger = "default-on",
+ .gpio = ZEUS_EXT0_GPIO(5),
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data zeus_leds_info = {
+ .leds = zeus_leds,
+ .num_leds = ARRAY_SIZE(zeus_leds),
+};
+
+static struct platform_device zeus_leds_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &zeus_leds_info,
+ },
+};
+
+static void zeus_cf_reset(int state)
+{
+ u16 cpld_state = __raw_readw(ZEUS_CPLD_CONTROL);
+
+ if (state)
+ cpld_state |= ZEUS_CPLD_CONTROL_CF_RST;
+ else
+ cpld_state &= ~ZEUS_CPLD_CONTROL_CF_RST;
+
+ __raw_writew(cpld_state, ZEUS_CPLD_CONTROL);
+}
+
+static struct arcom_pcmcia_pdata zeus_pcmcia_info = {
+ .cd_gpio = ZEUS_CF_CD_GPIO,
+ .rdy_gpio = ZEUS_CF_RDY_GPIO,
+ .pwr_gpio = ZEUS_CF_PWEN_GPIO,
+ .reset = zeus_cf_reset,
+};
+
+static struct platform_device zeus_pcmcia_device = {
+ .name = "zeus-pcmcia",
+ .id = -1,
+ .dev = {
+ .platform_data = &zeus_pcmcia_info,
+ },
+};
+
+static struct platform_device *zeus_devices[] __initdata = {
+ &zeus_serial_device,
+ &zeus_mtd_devices[0],
+ &zeus_dm9k0_device,
+ &zeus_dm9k1_device,
+ &zeus_sram_device,
+ &pxa2xx_spi_ssp3_device,
+ &zeus_leds_device,
+ &zeus_pcmcia_device,
+};
+
+/* AC'97 */
+static pxa2xx_audio_ops_t zeus_ac97_info = {
+ .reset_gpio = 95,
+};
+
+
+/*
+ * USB host
+ */
+
+static int zeus_ohci_init(struct device *dev)
+{
+ int err;
+
+ /* Switch on port 2. */
+ if ((err = gpio_request(ZEUS_USB2_PWREN_GPIO, "USB2_PWREN"))) {
+ dev_err(dev, "Can't request USB2_PWREN\n");
+ return err;
+ }
+
+ if ((err = gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 1))) {
+ gpio_free(ZEUS_USB2_PWREN_GPIO);
+ dev_err(dev, "Can't enable USB2_PWREN\n");
+ return err;
+ }
+
+ /* Port 2 is shared between host and client interface. */
+ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
+
+ return 0;
+}
+
+static void zeus_ohci_exit(struct device *dev)
+{
+ /* Power-off port 2 */
+ gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 0);
+ gpio_free(ZEUS_USB2_PWREN_GPIO);
+}
+
+static struct pxaohci_platform_data zeus_ohci_platform_data = {
+ .port_mode = PMM_NPS_MODE,
+ .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
+ .init = zeus_ohci_init,
+ .exit = zeus_ohci_exit,
+};
+
+/*
+ * Flat Panel
+ */
+
+static void zeus_lcd_power(int on, struct fb_var_screeninfo *si)
+{
+ gpio_set_value(ZEUS_LCD_EN_GPIO, on);
+}
+
+static void zeus_backlight_power(int on)
+{
+ gpio_set_value(ZEUS_BKLEN_GPIO, on);
+}
+
+static int zeus_setup_fb_gpios(void)
+{
+ int err;
+
+ if ((err = gpio_request(ZEUS_LCD_EN_GPIO, "LCD_EN")))
+ goto out_err;
+
+ if ((err = gpio_direction_output(ZEUS_LCD_EN_GPIO, 0)))
+ goto out_err_lcd;
+
+ if ((err = gpio_request(ZEUS_BKLEN_GPIO, "BKLEN")))
+ goto out_err_lcd;
+
+ if ((err = gpio_direction_output(ZEUS_BKLEN_GPIO, 0)))
+ goto out_err_bkl;
+
+ return 0;
+
+out_err_bkl:
+ gpio_free(ZEUS_BKLEN_GPIO);
+out_err_lcd:
+ gpio_free(ZEUS_LCD_EN_GPIO);
+out_err:
+ return err;
+}
+
+static struct pxafb_mode_info zeus_fb_mode_info[] = {
+ {
+ .pixclock = 39722,
+
+ .xres = 640,
+ .yres = 480,
+
+ .bpp = 16,
+
+ .hsync_len = 63,
+ .left_margin = 16,
+ .right_margin = 81,
+
+ .vsync_len = 2,
+ .upper_margin = 12,
+ .lower_margin = 31,
+
+ .sync = 0,
+ },
+};
+
+static struct pxafb_mach_info zeus_fb_info = {
+ .modes = zeus_fb_mode_info,
+ .num_modes = 1,
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+ .pxafb_lcd_power = zeus_lcd_power,
+ .pxafb_backlight_power = zeus_backlight_power,
+};
+
+/*
+ * MMC/SD Device
+ *
+ * The card detect interrupt isn't debounced so we delay it by 250ms
+ * to give the card a chance to fully insert/eject.
+ */
+
+static struct pxamci_platform_data zeus_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .detect_delay = HZ/4,
+ .gpio_card_detect = ZEUS_MMC_CD_GPIO,
+ .gpio_card_ro = ZEUS_MMC_WP_GPIO,
+ .gpio_card_ro_invert = 1,
+ .gpio_power = -1
+};
+
+/*
+ * USB Device Controller
+ */
+static void zeus_udc_command(int cmd)
+{
+ switch (cmd) {
+ case PXA2XX_UDC_CMD_DISCONNECT:
+ pr_info("zeus: disconnecting USB client\n");
+ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
+ break;
+
+ case PXA2XX_UDC_CMD_CONNECT:
+ pr_info("zeus: connecting USB client\n");
+ UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE;
+ break;
+ }
+}
+
+static struct pxa2xx_udc_mach_info zeus_udc_info = {
+ .udc_command = zeus_udc_command,
+};
+
+static void zeus_power_off(void)
+{
+ local_irq_disable();
+ pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
+}
+
+int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
+ unsigned ngpio, void *context)
+{
+ int i;
+ u8 pcb_info = 0;
+
+ for (i = 0; i < 8; i++) {
+ int pcb_bit = gpio + i + 8;
+
+ if (gpio_request(pcb_bit, "pcb info")) {
+ dev_err(&client->dev, "Can't request pcb info %d\n", i);
+ continue;
+ }
+
+ if (gpio_direction_input(pcb_bit)) {
+ dev_err(&client->dev, "Can't read pcb info %d\n", i);
+ gpio_free(pcb_bit);
+ continue;
+ }
+
+ pcb_info |= !!gpio_get_value(pcb_bit) << i;
+
+ gpio_free(pcb_bit);
+ }
+
+ dev_info(&client->dev, "Zeus PCB version %d issue %d\n",
+ pcb_info >> 4, pcb_info & 0xf);
+
+ return 0;
+}
+
+static struct pca953x_platform_data zeus_pca953x_pdata[] = {
+ [0] = { .gpio_base = ZEUS_EXT0_GPIO_BASE, },
+ [1] = {
+ .gpio_base = ZEUS_EXT1_GPIO_BASE,
+ .setup = zeus_get_pcb_info,
+ },
+ [2] = { .gpio_base = ZEUS_USER_GPIO_BASE, },
+};
+
+static struct i2c_board_info __initdata zeus_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("pca9535", 0x21),
+ .platform_data = &zeus_pca953x_pdata[0],
+ },
+ {
+ I2C_BOARD_INFO("pca9535", 0x22),
+ .platform_data = &zeus_pca953x_pdata[1],
+ },
+ {
+ I2C_BOARD_INFO("pca9535", 0x20),
+ .platform_data = &zeus_pca953x_pdata[2],
+ .irq = gpio_to_irq(ZEUS_EXTGPIO_GPIO),
+ },
+ { I2C_BOARD_INFO("lm75a", 0x48) },
+ { I2C_BOARD_INFO("24c01", 0x50) },
+ { I2C_BOARD_INFO("isl1208", 0x6f) },
+};
+
+static mfp_cfg_t zeus_pin_config[] __initdata = {
+ GPIO15_nCS_1,
+ GPIO78_nCS_2,
+ GPIO80_nCS_4,
+ GPIO33_nCS_5,
+
+ GPIO22_GPIO,
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+
+ GPIO88_USBH1_PWR,
+ GPIO89_USBH1_PEN,
+ GPIO119_USBH2_PWR,
+ GPIO120_USBH2_PEN,
+
+ GPIO86_LCD_LDD_16,
+ GPIO87_LCD_LDD_17,
+
+ GPIO102_GPIO,
+ GPIO104_CIF_DD_2,
+ GPIO105_CIF_DD_1,
+
+ GPIO48_nPOE,
+ GPIO49_nPWE,
+ GPIO50_nPIOR,
+ GPIO51_nPIOW,
+ GPIO85_nPCE_1,
+ GPIO54_nPCE_2,
+ GPIO79_PSKTSEL,
+ GPIO55_nPREG,
+ GPIO56_nPWAIT,
+ GPIO57_nIOIS16,
+ GPIO36_GPIO, /* CF CD */
+ GPIO97_GPIO, /* CF PWREN */
+ GPIO99_GPIO, /* CF RDY */
+};
+
+static void __init zeus_init(void)
+{
+ u16 dm9000_msc = 0xe279;
+
+ system_rev = __raw_readw(ZEUS_CPLD_VERSION);
+ pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
+
+ /* Fix timings for dm9000s (CS1/CS2)*/
+ MSC0 = (MSC0 & 0xffff) | (dm9000_msc << 16);
+ MSC1 = (MSC1 & 0xffff0000) | dm9000_msc;
+
+ pm_power_off = zeus_power_off;
+
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
+
+ platform_add_devices(zeus_devices, ARRAY_SIZE(zeus_devices));
+
+ pxa_set_ohci_info(&zeus_ohci_platform_data);
+
+ if (zeus_setup_fb_gpios())
+ pr_err("Failed to setup fb gpios\n");
+ else
+ set_pxa_fb_info(&zeus_fb_info);
+
+ pxa_set_mci_info(&zeus_mci_platform_data);
+ pxa_set_udc_info(&zeus_udc_info);
+ pxa_set_ac97_info(&zeus_ac97_info);
+ pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
+}
+
+static struct map_desc zeus_io_desc[] __initdata = {
+ {
+ .virtual = ZEUS_CPLD_VERSION,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_VERSION_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_ISA_IRQ,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_ISA_IRQ_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_CONTROL,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_CONTROL_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_EXTWDOG,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_EXTWDOG_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_PC104IO,
+ .pfn = __phys_to_pfn(ZEUS_PC104IO_PHYS),
+ .length = 0x00800000,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init zeus_map_io(void)
+{
+ pxa_map_io();
+
+ iotable_init(zeus_io_desc, ARRAY_SIZE(zeus_io_desc));
+
+ /* Clear PSPR to ensure a full restart on wake-up. */
+ PMCR = PSPR = 0;
+
+ /* enable internal 32.768Khz oscillator (ignore OSCC_OOK) */
+ OSCC |= OSCC_OON;
+
+ /* Some clock cycles later (from OSCC_ON), programme PCFR (OPDE...).
+ * float chip selects and PCMCIA */
+ PCFR = PCFR_OPDE | PCFR_DC_EN | PCFR_FS | PCFR_FP;
+}
+
+MACHINE_START(ARCOM_ZEUS, "Arcom ZEUS")
+ /* Maintainer: Marc Zyngier <maz@misterjones.org> */
+ .phys_io = 0x40000000,
+ .io_pg_offst = ((io_p2v(0x40000000) >> 18) & 0xfffc),
+ .boot_params = 0xa0000100,
+ .map_io = zeus_map_io,
+ .init_irq = zeus_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = zeus_init,
+MACHINE_END
+
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index c48e1f2c3349..ee5e392430e8 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -70,7 +70,7 @@ config MACH_REALVIEW_PBX
bool "Support RealView/PBX platform"
select ARM_GIC
select HAVE_PATA_PLATFORM
- select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !HIGH_PHYS_OFFSET
+ select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
select ZONE_DMA if SPARSEMEM
help
Include support for the ARM(R) RealView PBX platform.
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
index f76d6ff4aeb9..0b4a3a03071f 100644
--- a/arch/arm/mach-s3c2442/mach-gta02.c
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -268,6 +268,9 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.batteries = gta02_batteries,
.num_batteries = ARRAY_SIZE(gta02_batteries),
+
+ .charger_reference_current_ma = 1000,
+
.reg_init_data = {
[PCF50633_REGULATOR_AUTO] = {
.constraints = {
diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h
index 585211ca0187..7d74fd5c8d66 100644
--- a/arch/arm/mach-s3c24a0/include/mach/memory.h
+++ b/arch/arm/mach-s3c24a0/include/mach/memory.h
@@ -15,5 +15,7 @@
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index 03a7f3857c5e..b17d52f7cc48 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -4,6 +4,7 @@ menu "SA11x0 Implementations"
config SA1100_ASSABET
bool "Assabet"
+ select CPU_FREQ_SA1110
help
Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
Microprocessor Development Board (also known as the Assabet).
@@ -19,6 +20,7 @@ config ASSABET_NEPONSET
config SA1100_CERF
bool "CerfBoard"
+ select CPU_FREQ_SA1110
help
The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued).
More information is available at:
@@ -45,6 +47,7 @@ endchoice
config SA1100_COLLIE
bool "Sharp Zaurus SL5500"
+ # FIXME: select CPU_FREQ_SA11x0
select SHARP_LOCOMO
select SHARP_SCOOP
select SHARP_PARAM
@@ -54,6 +57,7 @@ config SA1100_COLLIE
config SA1100_H3100
bool "Compaq iPAQ H3100"
select HTC_EGPIO
+ select CPU_FREQ_SA1100
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
H3100 handheld computer. Information about this machine and the
@@ -64,6 +68,7 @@ config SA1100_H3100
config SA1100_H3600
bool "Compaq iPAQ H3600/H3700"
select HTC_EGPIO
+ select CPU_FREQ_SA1100
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
H3600 handheld computer. Information about this machine and the
@@ -74,6 +79,7 @@ config SA1100_H3600
config SA1100_BADGE4
bool "HP Labs BadgePAD 4"
select SA1111
+ select CPU_FREQ_SA1100
help
Say Y here if you want to build a kernel for the HP Laboratories
BadgePAD 4.
@@ -81,6 +87,7 @@ config SA1100_BADGE4
config SA1100_JORNADA720
bool "HP Jornada 720"
select SA1111
+ # FIXME: select CPU_FREQ_SA11x0
help
Say Y here if you want to build a kernel for the HP Jornada 720
handheld computer. See <http://www.hp.com/jornada/products/720>
@@ -98,12 +105,14 @@ config SA1100_JORNADA720_SSP
config SA1100_HACKKIT
bool "HackKit Core CPU Board"
+ select CPU_FREQ_SA1100
help
Say Y here to support the HackKit Core CPU Board
<http://hackkit.eletztrick.de>;
config SA1100_LART
bool "LART"
+ select CPU_FREQ_SA1100
help
Say Y here if you are using the Linux Advanced Radio Terminal
(also known as the LART). See <http://www.lartmaker.nl/> for
@@ -111,6 +120,7 @@ config SA1100_LART
config SA1100_PLEB
bool "PLEB"
+ select CPU_FREQ_SA1100
help
Say Y here if you are using version 1 of the Portable Linux
Embedded Board (also known as PLEB).
@@ -119,6 +129,7 @@ config SA1100_PLEB
config SA1100_SHANNON
bool "Shannon"
+ select CPU_FREQ_SA1100
help
The Shannon (also known as a Tuxscreen, and also as a IS2630) was a
limited edition webphone produced by Philips. The Shannon is a SA1100
@@ -127,6 +138,7 @@ config SA1100_SHANNON
config SA1100_SIMPAD
bool "Simpad"
+ select CPU_FREQ_SA1110
help
The SIEMENS webpad SIMpad is based on the StrongARM 1110. There
are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB
@@ -145,3 +157,4 @@ config SA1100_SSP
endmenu
endif
+
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 9faea1511c1f..3c1fcd696714 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -58,7 +58,6 @@ static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
2802 /* 280.2 MHz */
};
-#if defined(CONFIG_CPU_FREQ_SA1100) || defined(CONFIG_CPU_FREQ_SA1110)
/* rounds up(!) */
unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
{
@@ -110,17 +109,6 @@ unsigned int sa11x0_getspeed(unsigned int cpu)
return cclk_frequency_100khz[PPCR & 0xf] * 100;
}
-#else
-/*
- * We still need to provide this so building without cpufreq works.
- */
-unsigned int cpufreq_get(unsigned int cpu)
-{
- return cclk_frequency_100khz[PPCR & 0xf] * 100;
-}
-EXPORT_SYMBOL(cpufreq_get);
-#endif
-
/*
* This is the SA11x0 sched_clock implementation. This has
* a resolution of 271ns, and a maximum value of 32025597s (370 days).
diff --git a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
new file mode 100644
index 000000000000..bd94819e314f
--- /dev/null
+++ b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/mach-w90x900/include/mach/nuc900_spi.h
+ *
+ * Copyright (c) 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#ifndef __ASM_ARCH_SPI_H
+#define __ASM_ARCH_SPI_H
+
+extern void mfp_set_groupg(struct device *dev);
+
+struct nuc900_spi_info {
+ unsigned int num_cs;
+ unsigned int lsb;
+ unsigned int txneg;
+ unsigned int rxneg;
+ unsigned int divider;
+ unsigned int sleep;
+ unsigned int txnum;
+ unsigned int txbitlen;
+ int bus_num;
+};
+
+struct nuc900_spi_chip {
+ unsigned char bits_per_word;
+};
+
+#endif /* __ASM_ARCH_SPI_H */
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
index 6c768b71ad64..53fcef7c5201 100644
--- a/arch/arm/plat-omap/debug-leds.c
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -293,7 +293,7 @@ static int fpga_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops fpga_dev_pm_ops = {
+static const struct dev_pm_ops fpga_dev_pm_ops = {
.suspend_noirq = fpga_suspend_noirq,
.resume_noirq = fpga_resume_noirq,
};
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 055160e0620e..04846811d0aa 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1431,7 +1431,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops omap_mpuio_dev_pm_ops = {
+static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
.suspend_noirq = omap_mpuio_suspend_noirq,
.resume_noirq = omap_mpuio_resume_noirq,
};
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index ce5dd2d1dc21..97d6c50c3dcb 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -472,8 +472,22 @@
#endif
#define TWL4030_GPIO_IRQ_END (TWL4030_GPIO_IRQ_BASE + TWL4030_GPIO_NR_IRQS)
+#define TWL6030_IRQ_BASE (OMAP_FPGA_IRQ_END)
+#ifdef CONFIG_TWL4030_CORE
+#define TWL6030_BASE_NR_IRQS 20
+#else
+#define TWL6030_BASE_NR_IRQS 0
+#endif
+#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS)
+
/* Total number of interrupts depends on the enabled blocks above */
-#define NR_IRQS TWL4030_GPIO_IRQ_END
+#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END)
+#define TWL_IRQ_END TWL4030_GPIO_IRQ_END
+#else
+#define TWL_IRQ_END TWL6030_IRQ_END
+#endif
+
+#define NR_IRQS TWL_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 2d7423af1197..aed05bc3c2ea 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -38,16 +38,72 @@ union vfp_state *last_VFP_context[NR_CPUS];
*/
unsigned int VFP_arch;
+/*
+ * Per-thread VFP initialization.
+ */
+static void vfp_thread_flush(struct thread_info *thread)
+{
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu;
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
+ /*
+ * Disable VFP to ensure we initialize it first. We must ensure
+ * that the modification of last_VFP_context[] and hardware disable
+ * are done for the same CPU and without preemption.
+ */
+ cpu = get_cpu();
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+}
+
+static void vfp_thread_release(struct thread_info *thread)
+{
+ /* release case: Per-thread VFP cleanup. */
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu = thread->cpu;
+
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+}
+
+/*
+ * When this function is called with the following 'cmd's, the following
+ * is true while this function is being run:
+ * THREAD_NOFTIFY_SWTICH:
+ * - the previously running thread will not be scheduled onto another CPU.
+ * - the next thread to be run (v) will not be running on another CPU.
+ * - thread->cpu is the local CPU number
+ * - not preemptible as we're called in the middle of a thread switch
+ * THREAD_NOTIFY_FLUSH:
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ * THREAD_NOTIFY_RELEASE:
+ * - the thread (v) will not be running on any CPU; it is a dead thread.
+ * - thread->cpu will be the last CPU the thread ran on, which may not
+ * be the current CPU.
+ * - we could be preempted if tree preempt rcu is enabled.
+ */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
- union vfp_state *vfp;
- __u32 cpu = thread->cpu;
if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
u32 fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
+ unsigned int cpu = thread->cpu;
+
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
@@ -74,25 +130,10 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
return NOTIFY_DONE;
}
- vfp = &thread->vfpstate;
- if (cmd == THREAD_NOTIFY_FLUSH) {
- /*
- * Per-thread VFP initialisation.
- */
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
- /*
- * Disable VFP to ensure we initialise it first.
- */
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- }
-
- /* flush and release case: Per-thread VFP cleanup. */
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (cmd == THREAD_NOTIFY_FLUSH)
+ vfp_thread_flush(thread);
+ else
+ vfp_thread_release(thread);
return NOTIFY_DONE;
}
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index d856354f4272..f2b319333184 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -112,6 +112,11 @@ config CPU_AT32AP7002
bool
select CPU_AT32AP700X
+# AP700X boards
+config BOARD_ATNGW100_COMMON
+ bool
+ select CPU_AT32AP7000
+
choice
prompt "AVR32 board type"
default BOARD_ATSTK1000
@@ -119,9 +124,13 @@ choice
config BOARD_ATSTK1000
bool "ATSTK1000 evaluation board"
-config BOARD_ATNGW100
+config BOARD_ATNGW100_MKI
bool "ATNGW100 Network Gateway"
- select CPU_AT32AP7000
+ select BOARD_ATNGW100_COMMON
+
+config BOARD_ATNGW100_MKII
+ bool "ATNGW100 mkII Network Gateway"
+ select BOARD_ATNGW100_COMMON
config BOARD_HAMMERHEAD
bool "Hammerhead board"
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index c21a3290d542..ead8a75203a9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -32,7 +32,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
head-y += arch/avr32/kernel/head.o
core-y += $(machdirs)
core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
-core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/
+core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/
core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/
core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/
core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/
diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig
index be27a0218ab4..4e55617ade2d 100644
--- a/arch/avr32/boards/atngw100/Kconfig
+++ b/arch/avr32/boards/atngw100/Kconfig
@@ -1,6 +1,17 @@
# NGW100 customization
-if BOARD_ATNGW100
+if BOARD_ATNGW100_COMMON
+
+config BOARD_ATNGW100_MKII_LCD
+ bool "Enable ATNGW100 mkII LCD interface"
+ depends on BOARD_ATNGW100_MKII
+ help
+ This enables the LCD controller (LCDC) in the AT32AP7000. Since the
+ LCDC is multiplexed with MACB1 (LAN) Ethernet port, only one can be
+ enabled at a time.
+
+ This choice enables the LCDC and disables the MACB1 interface marked
+ LAN on the PCB.
choice
prompt "Select an NGW100 add-on board to support"
@@ -11,15 +22,11 @@ config BOARD_ATNGW100_ADDON_NONE
config BOARD_ATNGW100_EVKLCD10X
bool "EVKLCD10X addon board"
+ depends on BOARD_ATNGW100_MKI || BOARD_ATNGW100_MKII_LCD
help
This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA)
- addon board for the NGW100. By enabling this the LCD controller and
- AC97 controller is added as platform devices.
-
- This choice disables the detect pin and the write-protect pin for the
- MCI platform device, since it conflicts with the LCD platform device.
- The MCI pins can be reenabled by editing the "add device function" but
- this may break the setup for other displays that use these pins.
+ addon board for the NGW100 and NGW100 mkII. By enabling this the LCD
+ controller and AC97 controller is added as platform devices.
config BOARD_ATNGW100_MRMT
bool "Mediama RMT1/2 add-on board"
@@ -55,4 +62,4 @@ if BOARD_ATNGW100_MRMT
source "arch/avr32/boards/atngw100/Kconfig_mrmt"
endif
-endif # BOARD_ATNGW100
+endif # BOARD_ATNGW100_COMMON
diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c
index 00337112c5ac..20388750d564 100644
--- a/arch/avr32/boards/atngw100/evklcd10x.c
+++ b/arch/avr32/boards/atngw100/evklcd10x.c
@@ -164,7 +164,12 @@ static int __init atevklcd10x_init(void)
at32_add_device_lcdc(0, &atevklcd10x_lcdc_data,
fbmem_start, fbmem_size,
- ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL);
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ ATMEL_LCDC_PRI_18BIT | ATMEL_LCDC_PC_DVAL
+#else
+ ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL
+#endif
+ );
at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH);
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index bf78e516a85f..7919be311f4a 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -302,6 +302,7 @@ static int __init mrmt1_init(void)
at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ),
GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH);
set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING );
+ at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info));
spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info));
#endif
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index bc299fbbeb4e..8c6a2440e345 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -20,6 +20,7 @@
#include <linux/leds.h>
#include <linux/spi/spi.h>
#include <linux/atmel-mci.h>
+#include <linux/usb/atmel_usba_udc.h>
#include <asm/io.h>
#include <asm/setup.h>
@@ -36,6 +37,75 @@ unsigned long at32_board_osc_rates[3] = {
[2] = 12000000, /* 12 MHz on osc1 */
};
+/*
+ * The ATNGW100 mkII is very similar to the ATNGW100. Both have the AT32AP7000
+ * chip on board; the difference is that the ATNGW100 mkII has 128 MB 32-bit
+ * SDRAM (the ATNGW100 has 32 MB 16-bit SDRAM) and 256 MB 16-bit NAND flash
+ * (the ATNGW100 has none.)
+ *
+ * The RAM difference is handled by the boot loader, so the only difference we
+ * end up handling here is the NAND flash, EBI pin reservation and if LCDC or
+ * MACB1 should be enabled.
+ */
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+#include <linux/mtd/partitions.h>
+#include <mach/smc.h>
+
+static struct smc_timing nand_timing __initdata = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 10,
+ .ncs_write_setup = 0,
+ .nwe_setup = 10,
+
+ .ncs_read_pulse = 30,
+ .nrd_pulse = 15,
+ .ncs_write_pulse = 30,
+ .nwe_pulse = 15,
+
+ .read_cycle = 30,
+ .write_cycle = 30,
+
+ .ncs_read_recover = 0,
+ .nrd_recover = 15,
+ .ncs_write_recover = 0,
+ /* WE# high -> RE# low min 60 ns */
+ .nwe_recover = 50,
+};
+
+static struct smc_config nand_config __initdata = {
+ .bus_width = 2,
+ .nrd_controlled = 1,
+ .nwe_controlled = 1,
+ .nwait_mode = 0,
+ .byte_write = 0,
+ .tdf_cycles = 2,
+ .tdf_mode = 0,
+};
+
+static struct mtd_partition nand_partitions[] = {
+ {
+ .name = "main",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct mtd_partition *nand_part_info(int size, int *num_partitions)
+{
+ *num_partitions = ARRAY_SIZE(nand_partitions);
+ return nand_partitions;
+}
+
+static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
+ .cle = 21,
+ .ale = 22,
+ .rdy_pin = GPIO_PIN_PB(28),
+ .enable_pin = GPIO_PIN_PE(23),
+ .bus_width_16 = true,
+ .partition_info = nand_part_info,
+};
+#endif
+
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
@@ -56,9 +126,9 @@ static struct spi_board_info spi0_board_info[] __initdata = {
static struct mci_platform_data __initdata mci0_data = {
.slot[0] = {
.bus_width = 4,
-#if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1)
- .detect_pin = GPIO_PIN_NONE,
- .wp_pin = GPIO_PIN_NONE,
+#if defined(CONFIG_BOARD_ATNGW100_MKII)
+ .detect_pin = GPIO_PIN_PC(25),
+ .wp_pin = GPIO_PIN_PE(22),
#else
.detect_pin = GPIO_PIN_PC(25),
.wp_pin = GPIO_PIN_PE(0),
@@ -66,6 +136,14 @@ static struct mci_platform_data __initdata mci0_data = {
},
};
+static struct usba_platform_data atngw100_usba_data __initdata = {
+#if defined(CONFIG_BOARD_ATNGW100_MKII)
+ .vbus_pin = GPIO_PIN_PE(26),
+#else
+ .vbus_pin = -ENODEV,
+#endif
+};
+
/*
* The next two functions should go away as the boot loader is
* supposed to initialize the macb address registers with a valid
@@ -173,18 +251,27 @@ static int __init atngw100_init(void)
unsigned i;
/*
- * ATNGW100 uses 16-bit SDRAM interface, so we don't need to
- * reserve any pins for it.
+ * ATNGW100 mkII uses 32-bit SDRAM interface. Reserve the
+ * SDRAM-specific pins so that nobody messes with them.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
+
+ smc_set_timing(&nand_config, &nand_timing);
+ smc_set_configuration(3, &nand_config);
+ at32_add_device_nand(0, &atngw100mkii_nand_data);
+#endif
at32_add_device_usart(0);
set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+#ifndef CONFIG_BOARD_ATNGW100_MKII_LCD
set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
+#endif
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
at32_add_device_mci(0, &mci0_data);
- at32_add_device_usba(0, NULL);
+ at32_add_device_usba(0, &atngw100_usba_data);
for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) {
at32_select_gpio(ngw_leds[i].gpio,
@@ -194,10 +281,14 @@ static int __init atngw100_init(void)
/* all these i2c/smbus pins should have external pullups for
* open-drain sharing among all I2C devices. SDA and SCL do;
- * PB28/EXTINT3 doesn't; it should be SMBALERT# (for PMBus),
- * but it's not available off-board.
+ * PB28/EXTINT3 (ATNGW100) and PE21 (ATNGW100 mkII) doesn't; it should
+ * be SMBALERT# (for PMBus), but it's not available off-board.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_select_periph(GPIO_PIOE_BASE, 1 << 21, 0, AT32_GPIOF_PULLUP);
+#else
at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP);
+#endif
at32_select_gpio(i2c_gpio_data.sda_pin,
AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
at32_select_gpio(i2c_gpio_data.scl_pin,
@@ -211,14 +302,22 @@ postcore_initcall(atngw100_init);
static int __init atngw100_arch_init(void)
{
- /* PB30 is the otherwise unused jumper on the mainboard, with an
- * external pullup; the jumper grounds it. Use it however you
- * like, including letting U-Boot or Linux tweak boot sequences.
+ /* PB30 (ATNGW100) and PE30 (ATNGW100 mkII) is the otherwise unused
+ * jumper on the mainboard, with an external pullup; the jumper grounds
+ * it. Use it however you like, including letting U-Boot or Linux tweak
+ * boot sequences.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_select_gpio(GPIO_PIN_PE(30), 0);
+ gpio_request(GPIO_PIN_PE(30), "j15");
+ gpio_direction_input(GPIO_PIN_PE(30));
+ gpio_export(GPIO_PIN_PE(30), false);
+#else
at32_select_gpio(GPIO_PIN_PB(30), 0);
gpio_request(GPIO_PIN_PB(30), "j15");
gpio_direction_input(GPIO_PIN_PB(30));
gpio_export(GPIO_PIN_PB(30), false);
+#endif
/* set_irq_type() after the arch_initcall for EIC has run, and
* before the I2C subsystem could try using this IRQ.
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 574aca975334..32205c9d37d4 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc1
-# Tue Aug 5 16:00:47 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:39:22 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,38 +75,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
-# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
-# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-# CONFIG_USE_GENERIC_SMP_HELPERS is not set
CONFIG_HAVE_CLK=y
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -98,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -118,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -133,8 +148,23 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATSTK1002 is not set
+# CONFIG_BOARD_ATSTK1003 is not set
+# CONFIG_BOARD_ATSTK1004 is not set
+# CONFIG_BOARD_ATSTK1006 is not set
+# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
+# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
+CONFIG_BOARD_ATNGW100_ADDON_NONE=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
+# CONFIG_BOARD_ATNGW100_MRMT is not set
CONFIG_LOADER_U_BOOT=y
#
@@ -150,7 +180,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -162,14 +192,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -177,7 +209,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
@@ -188,6 +220,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -219,6 +252,8 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
@@ -271,7 +306,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -314,10 +348,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -343,16 +379,18 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
@@ -364,26 +402,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -395,6 +440,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -404,6 +450,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -453,16 +500,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -478,9 +526,22 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
-# CONFIG_MTD_UBI is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -498,10 +559,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -534,26 +605,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -603,9 +685,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -614,7 +698,9 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
#
@@ -624,6 +710,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -644,14 +731,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-CONFIG_EEPROM_AT24=m
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -666,19 +745,28 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
#
+# Memory mapped GPIO expanders:
+#
+
+#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX732X is not set
@@ -694,11 +782,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -707,11 +799,11 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -720,22 +812,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -756,32 +843,43 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -789,12 +887,18 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
@@ -802,10 +906,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_MMC_TEST=m
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -815,7 +921,11 @@ CONFIG_LEDS_CLASS=y
# LED drivers
#
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -823,7 +933,13 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -855,25 +971,33 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -892,24 +1016,38 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
-CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_JBD=m
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -917,6 +1055,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -940,15 +1084,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -967,7 +1109,9 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_UBIFS_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -975,7 +1119,9 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
+CONFIG_UFS_FS=y
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1060,14 +1206,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1083,6 +1233,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1091,13 +1242,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1105,19 +1282,30 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
# CONFIG_CRYPTO_TEST is not set
@@ -1145,11 +1333,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1166,7 +1356,7 @@ CONFIG_CRYPTO_SHA1=y
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1186,15 +1376,21 @@ CONFIG_CRYPTO_DES=y
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
-# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
@@ -1204,8 +1400,9 @@ CONFIG_CRC7=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 86a45b5c9d0d..c732cc397ad0 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25.6
-# Wed Jun 18 16:06:32 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:36:39 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -124,13 +148,26 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATSTK1002 is not set
+# CONFIG_BOARD_ATSTK1003 is not set
+# CONFIG_BOARD_ATSTK1004 is not set
+# CONFIG_BOARD_ATSTK1006 is not set
+# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
+# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
-CONFIG_BOARD_ATNGW100_I2C_GPIO=y
CONFIG_LOADER_U_BOOT=y
#
@@ -139,14 +176,14 @@ CONFIG_LOADER_U_BOOT=y
# CONFIG_AP700X_32_BIT_SMC is not set
CONFIG_AP700X_16_BIT_SMC=y
# CONFIG_AP700X_8_BIT_SMC is not set
-CONFIG_GPIO_DEV=y
CONFIG_LOAD_ADDRESS=0x10000000
CONFIG_ENTRY_ADDRESS=0x90000000
CONFIG_PHYS_OFFSET=0x10000000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +195,36 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_DW_DMAC=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
# Power management options
#
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
@@ -194,6 +234,7 @@ CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
# CONFIG_CPU_FREQ_STAT is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +255,9 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -232,6 +271,7 @@ CONFIG_XFRM_USER=y
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -269,7 +309,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +324,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_INET6_XFRM_MODE_BEET=y
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IPV6_TUNNEL is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +351,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +382,20 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -358,26 +405,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -389,6 +443,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -398,10 +453,12 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -446,16 +503,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +529,11 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -499,10 +562,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -514,7 +587,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -536,25 +608,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +682,30 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -619,9 +718,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -636,9 +737,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -647,45 +750,44 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
#
-# I2C Hardware Bus support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ATMELTWI=m
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -695,30 +797,48 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
-CONFIG_HAVE_GPIO_LIB=y
#
-# GPIO Support
+# PPS support
#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
#
+# PCI GPIO expanders:
+#
+
+#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -731,24 +851,31 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -758,6 +885,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +893,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +907,9 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -792,119 +923,124 @@ CONFIG_FB_ATMEL=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE is not set
# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
-CONFIG_SND_TIMER=m
+CONFIG_SND_TIMER=y
CONFIG_SND_PCM=m
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=m
-# CONFIG_SND_DUMMY is not set
-# CONFIG_SND_MTPAV is not set
-# CONFIG_SND_SERIAL_U16550 is not set
-# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_DRIVERS is not set
#
-# AVR32 devices
-#
-CONFIG_SND_ATMEL_AC97=m
-
-#
-# SPI devices
-#
-
-#
-# System on Chip audio support
+# Atmel devices (AVR32 and AT91)
#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -913,7 +1049,13 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -921,6 +1063,14 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1100,84 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
#
-# Userspace I/O
+# DMA Devices
#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1002,6 +1185,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1025,15 +1214,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1059,8 +1246,10 @@ CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1260,16 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1151,16 +1337,24 @@ CONFIG_NLS_UTF8=m
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1366,48 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1192,63 +1415,118 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
@@ -1257,8 +1535,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index a96b68ea5e83..5ef67da343bc 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25.6
-# Wed Jun 18 16:09:32 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:37:19 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -124,13 +148,20 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
-CONFIG_BOARD_ATNGW100_I2C_GPIO=y
CONFIG_LOADER_U_BOOT=y
#
@@ -139,14 +170,14 @@ CONFIG_LOADER_U_BOOT=y
# CONFIG_AP700X_32_BIT_SMC is not set
CONFIG_AP700X_16_BIT_SMC=y
# CONFIG_AP700X_8_BIT_SMC is not set
-CONFIG_GPIO_DEV=y
CONFIG_LOAD_ADDRESS=0x10000000
CONFIG_ENTRY_ADDRESS=0x90000000
CONFIG_PHYS_OFFSET=0x10000000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +189,36 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_DW_DMAC=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
# Power management options
#
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
@@ -194,6 +228,7 @@ CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
# CONFIG_CPU_FREQ_STAT is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +249,9 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -232,6 +265,7 @@ CONFIG_XFRM_USER=y
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -269,7 +303,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +318,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_INET6_XFRM_MODE_BEET=y
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IPV6_TUNNEL is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +345,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +376,20 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -358,26 +399,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -389,6 +437,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -398,10 +447,12 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -446,16 +497,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +523,11 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -499,10 +556,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -514,7 +581,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -536,25 +602,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +676,30 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -619,9 +712,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -636,9 +731,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -647,45 +744,44 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
#
-# I2C Hardware Bus support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ATMELTWI=m
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -695,30 +791,48 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
-CONFIG_HAVE_GPIO_LIB=y
#
-# GPIO Support
+# PPS support
#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
#
+# PCI GPIO expanders:
+#
+
+#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -731,24 +845,31 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -758,6 +879,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +887,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +901,9 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -792,119 +917,124 @@ CONFIG_FB_ATMEL=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE is not set
# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
-CONFIG_SND_TIMER=m
+CONFIG_SND_TIMER=y
CONFIG_SND_PCM=m
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=m
-# CONFIG_SND_DUMMY is not set
-# CONFIG_SND_MTPAV is not set
-# CONFIG_SND_SERIAL_U16550 is not set
-# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_DRIVERS is not set
#
-# AVR32 devices
-#
-CONFIG_SND_ATMEL_AC97=m
-
-#
-# SPI devices
-#
-
-#
-# System on Chip audio support
+# Atmel devices (AVR32 and AT91)
#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -913,7 +1043,13 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -921,6 +1057,14 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1094,84 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
#
-# Userspace I/O
+# DMA Devices
#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1002,6 +1179,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1025,15 +1208,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1059,8 +1240,10 @@ CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1254,16 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1151,16 +1331,24 @@ CONFIG_NLS_UTF8=m
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1360,48 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1192,63 +1409,118 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
@@ -1257,8 +1529,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
new file mode 100644
index 000000000000..9b8b5b3b9c71
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -0,0 +1,1414 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:32:26 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATNGW100_MKII_LCD is not set
+CONFIG_BOARD_ATNGW100_ADDON_NONE=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+CONFIG_MMC_TEST=m
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_UBIFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+CONFIG_UFS_FS=y
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
new file mode 100644
index 000000000000..01e913d66be4
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -0,0 +1,1549 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:33:09 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+CONFIG_BOARD_ATNGW100_MKII_LCD=y
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_ATMEL=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=m
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
new file mode 100644
index 000000000000..bbf6bc316ecf
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -0,0 +1,1549 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:33:32 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+CONFIG_BOARD_ATNGW100_MKII_LCD=y
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_ATMEL=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=m
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 0abe90adb1a4..42dafce02389 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc1
-# Mon Aug 4 16:02:27 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 13:00:55 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -58,38 +74,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
-# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
-# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-# CONFIG_USE_GENERIC_SMP_HELPERS is not set
CONFIG_HAVE_CLK=y
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -97,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -117,7 +132,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -133,7 +148,12 @@ CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
CONFIG_BOARD_ATSTK1000=y
-# CONFIG_BOARD_ATNGW100 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
CONFIG_BOARD_ATSTK1002=y
# CONFIG_BOARD_ATSTK1003 is not set
# CONFIG_BOARD_ATSTK1004 is not set
@@ -159,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -171,14 +191,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -186,7 +208,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
@@ -197,6 +219,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -228,6 +251,8 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
@@ -295,10 +320,12 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
@@ -309,26 +336,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -340,6 +374,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -349,6 +384,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -398,17 +434,18 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
CONFIG_MTD_M25P80=m
CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -424,9 +461,22 @@ CONFIG_M25PXX_USE_FAST_READ=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
-# CONFIG_MTD_UBI is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -444,10 +494,20 @@ CONFIG_ATMEL_PWM=m
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
CONFIG_ATMEL_SSC=m
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -469,10 +529,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -489,8 +545,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
# CONFIG_SATA_PMP is not set
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -519,26 +577,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -580,18 +649,25 @@ CONFIG_INPUT_EVDEV=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
CONFIG_MOUSE_GPIO=m
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -622,9 +698,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -633,7 +711,9 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
#
@@ -643,6 +723,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -663,14 +744,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-CONFIG_EEPROM_AT24=m
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -685,19 +758,28 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
#
+# Memory mapped GPIO expanders:
+#
+
+#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX732X is not set
@@ -713,11 +795,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -726,11 +812,11 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -739,22 +825,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -764,6 +845,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -785,10 +867,15 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
CONFIG_LCD_LTV350QV=y
# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
# CONFIG_LCD_VGG2432A4 is not set
# CONFIG_LCD_PLATFORM is not set
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
@@ -799,6 +886,8 @@ CONFIG_LCD_LTV350QV=y
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_LOGO is not set
CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -807,12 +896,24 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+# CONFIG_SND_ATMEL_AC97C is not set
CONFIG_SND_SPI=y
CONFIG_SND_AT73C213=m
CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -825,33 +926,43 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -859,12 +970,18 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
@@ -872,10 +989,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -887,7 +1006,11 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_ATMEL_PWM=m
# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -895,7 +1018,13 @@ CONFIG_LEDS_GPIO=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -927,25 +1056,33 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -964,25 +1101,45 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -990,6 +1147,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1013,15 +1176,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1039,7 +1200,14 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
CONFIG_MINIX_FS=m
# CONFIG_OMFS_FS is not set
@@ -1122,6 +1290,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1130,6 +1299,9 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1145,6 +1317,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1153,13 +1326,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1167,19 +1366,30 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-CONFIG_CRYPTO_ALGAPI=m
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
@@ -1207,11 +1417,13 @@ CONFIG_CRYPTO_CBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1228,7 +1440,7 @@ CONFIG_CRYPTO_SHA1=m
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1247,18 +1459,24 @@ CONFIG_CRYPTO_DES=m
#
# Compression
#
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
-# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
+CONFIG_CRC16=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
@@ -1266,8 +1484,11 @@ CONFIG_CRC7=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index c1603c4860e0..363e2381f32a 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc8
-# Thu Dec 18 11:22:23 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 13:00:25 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -58,32 +74,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -91,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -111,7 +132,6 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
CONFIG_FREEZER=y
#
@@ -128,8 +148,11 @@ CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
CONFIG_BOARD_ATSTK1000=y
-# CONFIG_BOARD_ATNGW100 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
# CONFIG_BOARD_MIMC200 is not set
# CONFIG_BOARD_ATSTK1002 is not set
# CONFIG_BOARD_ATSTK1003 is not set
@@ -156,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -170,12 +193,14 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -194,6 +219,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -294,6 +320,7 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
@@ -309,20 +336,24 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -334,6 +365,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -343,6 +375,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -393,9 +426,7 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -406,6 +437,7 @@ CONFIG_MTD_DATAFLASH=m
CONFIG_MTD_DATAFLASH_OTP=y
CONFIG_MTD_M25P80=m
CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -432,6 +464,11 @@ CONFIG_MTD_NAND_ATMEL_ECC_HW=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -460,13 +497,22 @@ CONFIG_ATMEL_PWM=m
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ICS932S401 is not set
CONFIG_ATMEL_SSC=m
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
# CONFIG_C2PORT is not set
#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
@@ -486,10 +532,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -506,8 +548,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
# CONFIG_SATA_PMP is not set
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -536,12 +580,17 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -550,15 +599,18 @@ CONFIG_MACB=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -600,18 +652,25 @@ CONFIG_INPUT_EVDEV=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
CONFIG_MOUSE_GPIO=m
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -642,9 +701,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -653,6 +714,7 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
@@ -664,6 +726,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -684,14 +747,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -706,13 +761,18 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -738,11 +798,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -764,26 +828,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -817,8 +872,10 @@ CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
CONFIG_LCD_LTV350QV=y
# CONFIG_LCD_ILI9320 is not set
# CONFIG_LCD_TDO24M is not set
@@ -833,6 +890,7 @@ CONFIG_LCD_LTV350QV=y
# CONFIG_LOGO is not set
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -841,16 +899,28 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_DRIVERS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+# CONFIG_SND_ATMEL_AC97C is not set
CONFIG_SND_SPI=y
CONFIG_SND_AT73C213=m
CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -863,11 +933,10 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-# CONFIG_USB_MUSB_HDRC is not set
# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
@@ -882,18 +951,25 @@ CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_M66592 is not set
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -901,6 +977,12 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
@@ -917,6 +999,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
@@ -930,7 +1013,11 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_ATMEL_PWM=m
# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -939,7 +1026,12 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -972,6 +1064,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -983,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1014,32 +1108,42 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
-CONFIG_STAGING_EXCLUDE_BUILD=y
#
# File systems
#
-CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=m
-CONFIG_EXT4DEV_COMPAT=y
+CONFIG_EXT4_FS=y
# CONFIG_EXT4_FS_XATTR is not set
-CONFIG_JBD=m
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
-CONFIG_JBD2=m
+CONFIG_JBD2=y
# CONFIG_JBD2_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1047,6 +1151,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1076,10 +1186,7 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1099,12 +1206,13 @@ CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_XATTR=y
+# CONFIG_UBIFS_FS_XATTR is not set
# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
CONFIG_MINIX_FS=m
# CONFIG_OMFS_FS is not set
@@ -1124,7 +1232,6 @@ CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@@ -1188,6 +1295,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1196,6 +1304,9 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1211,6 +1322,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1219,6 +1331,8 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
@@ -1226,17 +1340,30 @@ CONFIG_FRAME_POINTER=y
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
-
-#
-# Tracers
-#
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1262,10 +1389,12 @@ CONFIG_CRYPTO_HASH=m
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=m
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
@@ -1293,11 +1422,13 @@ CONFIG_CRYPTO_CBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1334,6 +1465,7 @@ CONFIG_CRYPTO_DES=m
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
#
@@ -1341,11 +1473,13 @@ CONFIG_CRYPTO_LZO=y
#
CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=m
@@ -1357,8 +1491,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h
index 015bc75ea798..9e36e3ff77d2 100644
--- a/arch/avr32/include/asm/hardirq.h
+++ b/arch/avr32/include/asm/hardirq.h
@@ -1,23 +1,6 @@
#ifndef __ASM_AVR32_HARDIRQ_H
#define __ASM_AVR32_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <asm/irq.h>
-
#ifndef __ASSEMBLY__
-
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-void ack_bad_irq(unsigned int irq);
-
-/* Standard mappings for irq_cpustat_t above */
-#include <linux/irq_cpustat.h>
-
+#include <asm-generic/hardirq.h>
#endif /* __ASSEMBLY__ */
-
#endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 9f572229d318..9604f7758f9a 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -16,15 +16,6 @@
#include <linux/seq_file.h>
#include <linux/sysdev.h>
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ %u\n", irq);
-}
-
/* May be overridden by platform code */
int __weak nmi_enable(void)
{
@@ -51,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -66,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index c4b56654349a..9cd2bd91d64a 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -39,30 +39,10 @@ SECTIONS
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
- INIT_DATA
- . = ALIGN(16);
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- . = ALIGN(4);
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- __security_initcall_start = .;
- *(.security_initcall.init)
- __security_initcall_end = .;
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(32);
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
-#endif
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
}
+ INIT_DATA_SECTION(16)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_evba = .;
@@ -78,34 +58,16 @@ SECTIONS
_etext = .;
} = 0xd703d703
- . = ALIGN(4);
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
-
+ EXCEPTION_TABLE(4)
RODATA
- . = ALIGN(THREAD_SIZE);
-
.data : AT(ADDR(.data) - LOAD_OFFSET) {
_data = .;
_sdata = .;
- /*
- * First, the init task union, aligned to an 8K boundary.
- */
- *(.data.init_task)
- /* Then, the page-aligned data */
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- /* Then, the cacheline aligned data */
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- /* And the rest... */
+ INIT_TASK_DATA(THREAD_SIZE)
+ PAGE_ALIGNED_DATA(PAGE_SIZE);
+ CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
*(.data.rel*)
DATA_DATA
CONSTRUCTORS
@@ -113,16 +75,8 @@ SECTIONS
_edata = .;
}
-
- . = ALIGN(8);
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
- __bss_start = .;
- *(.bss)
- *(COMMON)
- . = ALIGN(8);
- __bss_stop = .;
- _end = .;
- }
+ BSS_SECTION(0, 8, 8)
+ _end = .;
DWARF_DEBUG
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index eb9d4dc2e86d..1aa1ea5e9212 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -15,6 +15,8 @@
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/usb/atmel_usba_udc.h>
+
+#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
#include <asm/io.h>
@@ -1181,19 +1183,32 @@ static struct resource atmel_spi1_resource[] = {
DEFINE_DEV(atmel_spi, 1);
DEV_CLK(spi_clk, atmel_spi1, pba, 1);
-static void __init
-at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
- unsigned int n, const u8 *pins)
+void __init
+at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n)
{
+ /*
+ * Manage the chipselects as GPIOs, normally using the same pins
+ * the SPI controller expects; but boards can use other pins.
+ */
+ static u8 __initdata spi_pins[][4] = {
+ { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
+ GPIO_PIN_PA(5), GPIO_PIN_PA(20) },
+ { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
+ GPIO_PIN_PB(4), GPIO_PIN_PA(27) },
+ };
unsigned int pin, mode;
+ /* There are only 2 SPI controllers */
+ if (bus_num > 1)
+ return;
+
for (; n; n--, b++) {
b->bus_num = bus_num;
if (b->chip_select >= 4)
continue;
pin = (unsigned)b->controller_data;
if (!pin) {
- pin = pins[b->chip_select];
+ pin = spi_pins[bus_num][b->chip_select];
b->controller_data = (void *)pin;
}
mode = AT32_GPIOF_OUTPUT;
@@ -1206,16 +1221,6 @@ at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
struct platform_device *__init
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
{
- /*
- * Manage the chipselects as GPIOs, normally using the same pins
- * the SPI controller expects; but boards can use other pins.
- */
- static u8 __initdata spi0_pins[] =
- { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
- GPIO_PIN_PA(5), GPIO_PIN_PA(20), };
- static u8 __initdata spi1_pins[] =
- { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
- GPIO_PIN_PB(4), GPIO_PIN_PA(27), };
struct platform_device *pdev;
u32 pin_mask;
@@ -1228,7 +1233,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP);
select_peripheral(PIOA, pin_mask, PERIPH_A, 0);
- at32_spi_setup_slaves(0, b, n, spi0_pins);
+ at32_spi_setup_slaves(0, b, n);
break;
case 1:
@@ -1239,7 +1244,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP);
select_peripheral(PIOB, pin_mask, PERIPH_B, 0);
- at32_spi_setup_slaves(1, b, n, spi1_pins);
+ at32_spi_setup_slaves(1, b, n);
break;
default:
@@ -1320,7 +1325,7 @@ struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct platform_device *pdev;
- struct dw_dma_slave *dws = &data->dma_slave;
+ struct mci_dma_slave *slave;
u32 pioa_mask;
u32 piob_mask;
@@ -1339,13 +1344,17 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
ARRAY_SIZE(atmel_mci0_resource)))
goto fail;
- dws->dma_dev = &dw_dmac0_device.dev;
- dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
- dws->cfg_hi = (DWC_CFGH_SRC_PER(0)
+ slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+
+ slave->sdata.dma_dev = &dw_dmac0_device.dev;
+ slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
+ slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
| DWC_CFGH_DST_PER(1));
- dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL
+ slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
| DWC_CFGL_HS_SRC_POL);
+ data->dma_slave = slave;
+
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))
goto fail;
@@ -1411,6 +1420,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
return pdev;
fail:
+ data->dma_slave = NULL;
+ kfree(slave);
platform_device_put(pdev);
return NULL;
}
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
new file mode 100644
index 000000000000..a9b38967f703
--- /dev/null
+++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
+#ifndef __MACH_ATMEL_MCI_H
+#define __MACH_ATMEL_MCI_H
+
+#include <linux/dw_dmac.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+ struct dw_dma_slave sdata;
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#define setup_dma_addr(s, t, r) do { \
+ if (s) { \
+ (s)->sdata.tx_reg = (t); \
+ (s)->sdata.rx_reg = (r); \
+ } \
+} while (0)
+
+#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index ddedb471f33e..c7f25bb1d068 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -49,6 +49,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
struct spi_board_info;
struct platform_device *
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
+void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
struct atmel_lcdfb_info;
struct platform_device *
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index b0c7f0ee4b03..1942ccfedbe0 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_read_lock_asm(volatile int *ptr);
-asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_write_lock_asm(volatile int *ptr);
-asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
-
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+asmlinkage void arch_read_lock_asm(volatile int *ptr);
+asmlinkage int arch_read_trylock_asm(volatile int *ptr);
+asmlinkage void arch_read_unlock_asm(volatile int *ptr);
+asmlinkage void arch_write_lock_asm(volatile int *ptr);
+asmlinkage int arch_write_trylock_asm(volatile int *ptr);
+asmlinkage void arch_write_unlock_asm(volatile int *ptr);
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_read_lock_asm(&rw->lock);
+ arch_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return __raw_read_trylock_asm(&rw->lock);
+ return arch_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_read_unlock_asm(&rw->lock);
+ arch_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_write_lock_asm(&rw->lock);
+ arch_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return __raw_write_trylock_asm(&rw->lock);
+ return arch_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_write_unlock_asm(&rw->lock);
+ arch_write_unlock_asm(&rw->lock);
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index be75762c0610..1a33608c958b 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -15,14 +15,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index db9f9c91f11f..64cff54a8a58 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
unsigned long flags;
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 78cb3d38f899..9636bace00e8 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
if (!in_atomic)
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
verbose_printk("\n");
unlock:
if (!in_atomic)
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
}
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c5..f171a6600fbc 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
rw->lock++;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc6..b5ce0724a88f 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index af3e824b91b3..62d1aba615dc 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 5c913d472119..c25dc2c2b1da 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_puts(p, " CPU0");
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942f..2d7f56a98e0f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
bool
default y
-config HAVE_LEGACY_PER_CPU_AREA
- def_bool y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool y
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
- * with release semantics. See also __raw_spin_unlock().
+ * with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017d..61c7b1750b16 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
- extern unsigned long vmalloc_end;
+ extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e7..69bf13857a9f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
-# define VMALLOC_END vmalloc_end
- extern unsigned long vmalloc_end;
+extern unsigned long VMALLOC_END;
#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef2..7fa90f73f6be 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
#endif
};
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
-#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
extern void print_cpu_info (struct cpuinfo_ia64 *);
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-#define __raw_spin_lock_init(x) ((x)->lock = 0)
+#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket, serve;
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
}
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->lock);
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return 0;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
}
}
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f3..40574ae11401 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
+#ifdef CONFIG_SMP
+ if (available_cpus == 0) {
+ printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
+ printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
+ smp_boot_data.cpu_phys_id[available_cpus] =
+ hard_smp_processor_id();
+ available_cpus = 1; /* We've got at least one of these, no? */
+ }
+ smp_boot_data.cpu_count = available_cpus;
+#endif
+ /* Make boot-up look pretty */
+ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
+ total_cpus);
+
return 0;
}
-
-
int __init acpi_boot_init(void)
{
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");
+#ifdef CONFIG_ACPI_NUMA
#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no? */
- }
- smp_boot_data.cpu_count = available_cpus;
-
- smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
if (srat_num_cpus == 0) {
int cpu, i = 1;
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
node_cpuid[i++].phys_id =
smp_boot_data.cpu_phys_id[cpu];
}
-# endif
#endif
-#ifdef CONFIG_ACPI_NUMA
build_cpu_to_node_map();
#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
return 0;
}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c4..17a9fba38930 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
* intermediate precision so that we can produce a full 64-bit result.
*/
GLOBAL_ENTRY(ia64_native_sched_clock)
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
;;
ldf8 f8=[r8]
;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e300627..461b99902bf6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__cpu_info);
+EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
goto unlock_iosapic_lock;
}
- spin_lock(&irq_desc[irq].lock);
+ raw_spin_lock(&irq_desc[irq].lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
irq = err;
goto unlock_iosapic_lock;
}
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..70e4bad23432 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d41..d5bdf9de36b6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fbe..c370e02f0061 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
// purge all TC entries
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801d..a1ea87919777 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
+# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
-#endif
+# endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
-#else
-# ifdef CONFIG_SMP
- smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
-# endif
#endif /* CONFIG_APCI_BOOT */
+#ifdef CONFIG_SMP
+ smp_build_cpu_map();
+#endif
find_memory();
/* process SAL system table: */
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
}
/*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
- /* start_kernel() requires this... */
-}
-#endif
-
-/*
* Do the following calculations:
*
* 1. the max. cache line size.
@@ -980,7 +967,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c988..1295ba327f6f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ __cpu0_per_cpu = .;
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
@@ -198,11 +204,6 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
-#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
void * __cpuinit
per_cpu_init (void)
{
- int cpu;
- static int first_time=1;
+ static bool first_time = true;
+ void *cpu0_data = __cpu0_per_cpu;
+ unsigned int cpu;
+
+ if (!first_time)
+ goto skip;
+ first_time = false;
/*
- * get_free_pages() cannot be used before cpu_init() done. BSP
- * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
- * get_zeroed_page().
+ * get_free_pages() cannot be used before cpu_init() done.
+ * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+ * to avoid that AP calls get_zeroed_page().
*/
- if (first_time) {
- void *cpu0_data = __cpu0_per_cpu;
+ for_each_possible_cpu(cpu) {
+ void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
- first_time=0;
+ memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
- per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+ (unsigned long)__per_cpu_start);
- for (cpu = 1; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
+ cpu_data += PERCPU_PAGE_SIZE;
}
+skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+ cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *gi;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int rc;
+
+ ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ gi = &ai->groups[0];
+
+ /* units are assigned consecutively to possible cpus */
+ for_each_possible_cpu(cpu)
+ gi->cpu_map[gi->nr_units++] = cpu;
+
+ /* set parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
+ VMALLOC_END -= map_size;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
/*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..19c4b2195dce 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
int cpu;
for_each_possible_early_cpu(cpu) {
- if (cpu == 0) {
- void *cpu0_data = __cpu0_per_cpu;
- __per_cpu_offset[cpu] = (char*)cpu0_data -
- __per_cpu_start;
- } else if (node == node_cpuid[cpu].nid) {
- memcpy(__va(cpu_data), __phys_per_cpu_start,
- __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
- __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- }
+ void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+ if (node != node_cpuid[cpu].nid)
+ continue;
+
+ memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+ __per_cpu_start;
+
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ (unsigned long)cpu_data -
+ (unsigned long)__per_cpu_start);
+
+ cpu_data += PERCPU_PAGE_SIZE;
}
#endif
return cpu_data;
}
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *uninitialized_var(gi);
+ unsigned int *cpu_map;
+ void *base;
+ unsigned long base_offset;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int node, prev_node, unit, nr_units, rc;
+
+ ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ cpu_map = ai->groups[0].cpu_map;
+
+ /* determine base */
+ base = (void *)ULONG_MAX;
+ for_each_possible_cpu(cpu)
+ base = min(base,
+ (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+ base_offset = (void *)__per_cpu_start - base;
+
+ /* build cpu_map, units are grouped by node */
+ unit = 0;
+ for_each_node(node)
+ for_each_possible_cpu(cpu)
+ if (node == node_cpuid[cpu].nid)
+ cpu_map[unit++] = cpu;
+ nr_units = unit;
+
+ /* set basic parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ /*
+ * CPUs are put into groups according to node. Walk cpu_map
+ * and create new groups at node boundaries.
+ */
+ prev_node = -1;
+ ai->nr_groups = 0;
+ for (unit = 0; unit < nr_units; unit++) {
+ cpu = cpu_map[unit];
+ node = node_cpuid[cpu].nid;
+
+ if (node == prev_node) {
+ gi->nr_units++;
+ continue;
+ }
+ prev_node = node;
+
+ gi = &ai->groups[ai->nr_groups++];
+ gi->nr_units = 1;
+ gi->base_offset = __per_cpu_offset[cpu] + base_offset;
+ gi->cpu_map = &cpu_map[unit];
+ }
+
+ rc = pcpu_setup_first_chunk(ai, base);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
+#endif
+
/**
* fill_pernode - initialize pernode data.
* @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
/* Set the node_data pointer for each per-cpu struct */
for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
- per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+ per_cpu(ia64_cpu_info, cpu).node_data =
+ mem_data[node].node_data;
}
#else
{
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__cpu_info - __per_cpu_start));
+ ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmem_map = (struct page *) vmalloc_end;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..b9609c69343a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2bae..e884ba4e031d 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
- 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2fe..a3fb7cf9ae1d 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
}
-static DEFINE_PER_CPU(int, timer_irq) = -1;
-static DEFINE_PER_CPU(int, ipi_irq) = -1;
-static DEFINE_PER_CPU(int, resched_irq) = -1;
-static DEFINE_PER_CPU(int, cmc_irq) = -1;
-static DEFINE_PER_CPU(int, cmcp_irq) = -1;
-static DEFINE_PER_CPU(int, cpep_irq) = -1;
+static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
+static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
#define NAME_SIZE 15
-static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
#undef NAME_SIZE
struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
if (xen_slab_ready) {
switch (vec) {
case IA64_TIMER_VECTOR:
- snprintf(per_cpu(timer_name, cpu),
- sizeof(per_cpu(timer_name, cpu)),
+ snprintf(per_cpu(xen_timer_name, cpu),
+ sizeof(per_cpu(xen_timer_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
- per_cpu(timer_name, cpu), action->dev_id);
- per_cpu(timer_irq, cpu) = irq;
+ per_cpu(xen_timer_name, cpu), action->dev_id);
+ per_cpu(xen_timer_irq, cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
- snprintf(per_cpu(resched_name, cpu),
- sizeof(per_cpu(resched_name, cpu)),
+ snprintf(per_cpu(xen_resched_name, cpu),
+ sizeof(per_cpu(xen_resched_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(resched_name, cpu), action->dev_id);
- per_cpu(resched_irq, cpu) = irq;
+ per_cpu(xen_resched_name, cpu), action->dev_id);
+ per_cpu(xen_resched_irq, cpu) = irq;
break;
case IA64_IPI_VECTOR:
- snprintf(per_cpu(ipi_name, cpu),
- sizeof(per_cpu(ipi_name, cpu)),
+ snprintf(per_cpu(xen_ipi_name, cpu),
+ sizeof(per_cpu(xen_ipi_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(ipi_name, cpu), action->dev_id);
- per_cpu(ipi_irq, cpu) = irq;
+ per_cpu(xen_ipi_name, cpu), action->dev_id);
+ per_cpu(xen_ipi_irq, cpu) = irq;
break;
case IA64_CMC_VECTOR:
- snprintf(per_cpu(cmc_name, cpu),
- sizeof(per_cpu(cmc_name, cpu)),
+ snprintf(per_cpu(xen_cmc_name, cpu),
+ sizeof(per_cpu(xen_cmc_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
- action->handler,
- action->flags,
- per_cpu(cmc_name, cpu),
- action->dev_id);
- per_cpu(cmc_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmc_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmc_irq, cpu) = irq;
break;
case IA64_CMCP_VECTOR:
- snprintf(per_cpu(cmcp_name, cpu),
- sizeof(per_cpu(cmcp_name, cpu)),
+ snprintf(per_cpu(xen_cmcp_name, cpu),
+ sizeof(per_cpu(xen_cmcp_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cmcp_name, cpu),
- action->dev_id);
- per_cpu(cmcp_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmcp_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmcp_irq, cpu) = irq;
break;
case IA64_CPEP_VECTOR:
- snprintf(per_cpu(cpep_name, cpu),
- sizeof(per_cpu(cpep_name, cpu)),
+ snprintf(per_cpu(xen_cpep_name, cpu),
+ sizeof(per_cpu(xen_cpep_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cpep_name, cpu),
- action->dev_id);
- per_cpu(cpep_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cpep_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cpep_irq, cpu) = irq;
break;
case IA64_CPE_VECTOR:
case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
if (action == CPU_DEAD) {
/* Unregister evtchn. */
- if (per_cpu(cpep_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
- per_cpu(cpep_irq, cpu) = -1;
+ if (per_cpu(xen_cpep_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
+ NULL);
+ per_cpu(xen_cpep_irq, cpu) = -1;
}
- if (per_cpu(cmcp_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
- per_cpu(cmcp_irq, cpu) = -1;
+ if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
+ NULL);
+ per_cpu(xen_cmcp_irq, cpu) = -1;
}
- if (per_cpu(cmc_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
- per_cpu(cmc_irq, cpu) = -1;
+ if (per_cpu(xen_cmc_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
+ per_cpu(xen_cmc_irq, cpu) = -1;
}
- if (per_cpu(ipi_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
- per_cpu(ipi_irq, cpu) = -1;
+ if (per_cpu(xen_ipi_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
+ per_cpu(xen_ipi_irq, cpu) = -1;
}
- if (per_cpu(resched_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(resched_irq, cpu),
- NULL);
- per_cpu(resched_irq, cpu) = -1;
+ if (per_cpu(xen_resched_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
+ NULL);
+ per_cpu(xen_resched_irq, cpu) = -1;
}
- if (per_cpu(timer_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
- per_cpu(timer_irq, cpu) = -1;
+ if (per_cpu(xen_timer_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
+ NULL);
+ per_cpu(xen_timer_irq, cpu) = -1;
}
}
return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e20..c1c544513e8d 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
#include "../kernel/fsyscall_gtod_data.h"
-DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-DEFINE_PER_CPU(unsigned long, processed_stolen_time);
-DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
+static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
/* taken from i386/kernel/time-xen.c */
static void xen_init_missing_ticks_accounting(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+ struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
int rc;
memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
&area);
WARN_ON(rc && rc != -ENOSYS);
- per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
- per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+ per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ runstate->time[RUNSTATE_offline];
}
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
* This function just checks and reject this effect.
*/
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
- per_cpu(processed_blocked_time, cpu)))
+ per_cpu(xen_blocked_time, cpu)))
blocked = 0;
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
runstate.time[RUNSTATE_offline],
- per_cpu(processed_stolen_time, cpu)))
+ per_cpu(xen_stolen_time, cpu)))
stolen = 0;
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
} else {
local_cpu_data->itm_next = delta_itm + new_itm;
}
- per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
- per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
+ per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
+ per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
return delta_itm;
}
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b2..179a06489b10 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
/**
- * __raw_spin_trylock - Try spin lock and return a result
+ * arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
- * __raw_spin_trylock() tries to get the lock and returns a result.
+ * arch_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_trylock \n\t"
+ "# arch_spin_trylock \n\t"
"ldi %1, #0; \n\t"
"mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (oldval > 0);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_lock \n\t"
+ "# arch_spin_lock \n\t"
".fillinsn \n"
"1: \n\t"
"mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e4..92e27672661f 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,17 +7,17 @@
typedef struct {
volatile int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 8dfd31e87c4c..3c71f776872c 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index fe60e1abaee8..aca0e28581c7 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -83,9 +83,9 @@
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END KMAP_START
#else
-extern unsigned long vmalloc_end;
+extern unsigned long m68k_vmalloc_end;
#define VMALLOC_START 0x0f800000
-#define VMALLOC_END vmalloc_end
+#define VMALLOC_END m68k_vmalloc_end
#endif /* CONFIG_SUN3 */
/* zero page used for uninitialized stuff */
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 3cd19390aae5..94f81ecfe3f8 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -45,8 +45,8 @@
** Globals
*/
-unsigned long vmalloc_end;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long m68k_vmalloc_end;
+EXPORT_SYMBOL(m68k_vmalloc_end);
unsigned long pmeg_vaddr[PMEGS_NUM];
unsigned char pmeg_alloc[PMEGS_NUM];
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
#endif
// the lowest mapping here is the end of our
// vmalloc region
- if(!vmalloc_end)
- vmalloc_end = seg;
+ if (!m68k_vmalloc_end)
+ m68k_vmalloc_end = seg;
// mark the segmap alloc'd, and reserve any
// of the first 0xbff pages the hardware is
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index bbd8327f1890..fd53e500be67 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,8 +6,15 @@ mainmenu "Linux/Microblaze Kernel Configuration"
config MICROBLAZE
def_bool y
select HAVE_LMB
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_OPROFILE
+ select TRACING_SUPPORT
config SWAP
def_bool n
@@ -57,12 +64,24 @@ config GENERIC_GPIO
config GENERIC_CSUM
def_bool y
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
config PCI
def_bool n
config NO_DMA
def_bool y
+config DTC
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 242cd35bdb4b..9dc708a7f700 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -3,6 +3,9 @@
menu "Kernel hacking"
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
source "lib/Kconfig.debug"
config EARLY_PRINTK
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 34187354304a..d2d6cfcb1a30 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -51,6 +51,8 @@ core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-y += arch/microblaze/platform/
+drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
+
boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 21f13322a4ca..902cf9846c3c 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,11 +2,13 @@
# arch/microblaze/boot/Makefile
#
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
obj-y += linked_dtb.o
targets := linux.bin linux.bin.gz simpleImage.%
-OBJCOPYFLAGS_linux.bin := -O binary
+OBJCOPYFLAGS := -O binary
# Where the DTS files live
dtstree := $(srctree)/$(src)/dts
@@ -24,6 +26,7 @@ $(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy)
+ $(call if_changed,uimage)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
@@ -36,8 +39,16 @@ quiet_cmd_cp = CP $< $@$2
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@
+quiet_cmd_uimage = UIMAGE $@.ub
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
+ -C none -n 'Linux-$(KERNELRELEASE)' \
+ -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \
+ -d $@ $@.ub
+
$(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,cp,.unstrip)
+ $(call if_changed,objcopy)
+ $(call if_changed,uimage)
$(call if_changed,strip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -53,4 +64,4 @@ $(obj)/%.dtb: $(dtstree)/%.dts FORCE
clean-kernel += linux.bin linux.bin.gz simpleImage.*
-clean-files += *.dtb
+clean-files += *.dtb simpleImage.*.unstrip
diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
index c209c47509d5..e52210891d78 100644
--- a/arch/microblaze/include/asm/cache.h
+++ b/arch/microblaze/include/asm/cache.h
@@ -21,20 +21,4 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-void _enable_icache(void);
-void _disable_icache(void);
-void _invalidate_icache(unsigned int addr);
-
-#define __enable_icache() _enable_icache()
-#define __disable_icache() _disable_icache()
-#define __invalidate_icache(addr) _invalidate_icache(addr)
-
-void _enable_dcache(void);
-void _disable_dcache(void);
-void _invalidate_dcache(unsigned int addr);
-
-#define __enable_dcache() _enable_dcache()
-#define __disable_dcache() _disable_dcache()
-#define __invalidate_dcache(addr) _invalidate_dcache(addr)
-
#endif /* _ASM_MICROBLAZE_CACHE_H */
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 088076e657b3..a6edd356cd08 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -18,6 +18,8 @@
/* Somebody depends on this; sigh... */
#include <linux/mm.h>
+/* Look at Documentation/cachetlb.txt */
+
/*
* Cache handling functions.
* Microblaze has a write-through data cache, meaning that the data cache
@@ -27,78 +29,81 @@
* instruction cache to make sure we don't fetch old, bad code.
*/
+/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate,
+ * suffix r = range */
+struct scache {
+ /* icache */
+ void (*ie)(void); /* enable */
+ void (*id)(void); /* disable */
+ void (*ifl)(void); /* flush */
+ void (*iflr)(unsigned long a, unsigned long b);
+ void (*iin)(void); /* invalidate */
+ void (*iinr)(unsigned long a, unsigned long b);
+ /* dcache */
+ void (*de)(void); /* enable */
+ void (*dd)(void); /* disable */
+ void (*dfl)(void); /* flush */
+ void (*dflr)(unsigned long a, unsigned long b);
+ void (*din)(void); /* invalidate */
+ void (*dinr)(unsigned long a, unsigned long b);
+};
+
+/* microblaze cache */
+extern struct scache *mbc;
+
+void microblaze_cache_init(void);
+
+#define enable_icache() mbc->ie();
+#define disable_icache() mbc->id();
+#define flush_icache() mbc->ifl();
+#define flush_icache_range(start, end) mbc->iflr(start, end);
+#define invalidate_icache() mbc->iin();
+#define invalidate_icache_range(start, end) mbc->iinr(start, end);
+
+
+#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
+#define flush_icache_page(vma, pg) do { } while (0)
+
+#define enable_dcache() mbc->de();
+#define disable_dcache() mbc->dd();
/* FIXME for LL-temac driver */
-#define invalidate_dcache_range(start, end) \
- __invalidate_dcache_range(start, end)
-
-#define flush_cache_all() __invalidate_cache_all()
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) __invalidate_cache_all()
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define invalidate_dcache() mbc->din();
+#define invalidate_dcache_range(start, end) mbc->dinr(start, end);
+#define flush_dcache() mbc->dfl();
+#define flush_dcache_range(start, end) mbc->dflr(start, end);
-#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+/* D-cache aliasing problem can't happen - cache is between MMU and ram */
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
-#define flush_icache_page(vma, pg) do { } while (0)
-
-#ifndef CONFIG_MMU
-# define flush_icache_user_range(start, len) do { } while (0)
-#else
-# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
-
-# define flush_page_to_ram(page) do { } while (0)
-# define flush_icache() __invalidate_icache_all()
-# define flush_cache_sigtramp(vaddr) \
- __invalidate_icache_range(vaddr, vaddr + 8)
-
-# define flush_dcache_mmap_lock(mapping) do { } while (0)
-# define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-# define flush_cache_dup_mm(mm) do { } while (0)
+/* MS: kgdb code use this macro, wrong len with FLASH */
+#if 0
+#define flush_cache_range(vma, start, len) { \
+ flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
+ flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
+}
#endif
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-struct page;
-struct mm_struct;
-struct vm_area_struct;
-
-/* see arch/microblaze/kernel/cache.c */
-extern void __invalidate_icache_all(void);
-extern void __invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __invalidate_icache_page(struct vm_area_struct *vma,
- struct page *page);
-extern void __invalidate_icache_user_range(struct vm_area_struct *vma,
- struct page *page,
- unsigned long adr, int len);
-extern void __invalidate_cache_sigtramp(unsigned long addr);
-
-extern void __invalidate_dcache_all(void);
-extern void __invalidate_dcache_range(unsigned long start, unsigned long end);
-extern void __invalidate_dcache_page(struct vm_area_struct *vma,
- struct page *page);
-extern void __invalidate_dcache_user_range(struct vm_area_struct *vma,
- struct page *page,
- unsigned long adr, int len);
-
-extern inline void __invalidate_cache_all(void)
-{
- __invalidate_icache_all();
- __invalidate_dcache_all();
-}
+#define flush_cache_range(vma, start, len) do { } while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy((dst), (src), (len)); \
- flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+ flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+} while (0)
#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index 52f28f6dc4eb..b4f5ca33aebf 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -43,7 +43,7 @@ struct cpuinfo {
u32 use_icache;
u32 icache_tagbits;
u32 icache_write;
- u32 icache_line;
+ u32 icache_line_length;
u32 icache_size;
unsigned long icache_base;
unsigned long icache_high;
@@ -51,8 +51,9 @@ struct cpuinfo {
u32 use_dcache;
u32 dcache_tagbits;
u32 dcache_write;
- u32 dcache_line;
+ u32 dcache_line_length;
u32 dcache_size;
+ u32 dcache_wb;
unsigned long dcache_base;
unsigned long dcache_high;
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 30286db27c1c..78a038452c0f 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -19,6 +19,18 @@ struct dev_archdata {
struct pdev_archdata {
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->of_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->of_node;
+}
+
#endif /* _ASM_MICROBLAZE_DEVICE_H */
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index 8b137891791f..fd2fa2eca62f 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -1 +1,26 @@
+#ifndef _ASM_MICROBLAZE_FTRACE
+#define _ASM_MICROBLAZE_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((long)(_mcount))
+#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+extern void ftrace_call_graph(void);
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* _ASM_MICROBLAZE_FTRACE */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 0b745828f42b..8dbb6e7a03a2 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -1 +1,126 @@
-#include <asm-generic/futex.h>
+#ifndef _ASM_MICROBLAZE_FUTEX_H
+#define _ASM_MICROBLAZE_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+({ \
+ __asm__ __volatile__ ( \
+ "1: lwx %0, %2, r0; " \
+ insn \
+ "2: swx %1, %2, r0; \
+ addic %1, r0, 0; \
+ bnei %1, 1b; \
+ 3: \
+ .section .fixup,\"ax\"; \
+ 4: brid 3b; \
+ addik %1, r0, %3; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ ); \
+})
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("or %1,%4,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int prev, cmp;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ __asm__ __volatile__ ("1: lwx %0, %2, r0; \
+ cmp %1, %0, %3; \
+ beqi %1, 3f; \
+ 2: swx %4, %2, r0; \
+ addic %1, r0, 0; \
+ bnei %1, 1b; \
+ 3: \
+ .section .fixup,\"ax\"; \
+ 4: brid 3b; \
+ addik %0, r0, %5; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (prev), "=&r"(cmp) \
+ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
+
+ return prev;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index dea65645a4f8..2c38c6d80176 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -10,78 +10,73 @@
#define _ASM_MICROBLAZE_IRQFLAGS_H
#include <linux/irqflags.h>
+#include <asm/registers.h>
# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-# define local_irq_save(flags) \
+# define raw_local_irq_save(flags) \
do { \
- asm volatile ("# local_irq_save \n\t" \
- "msrclr %0, %1 \n\t" \
- "nop \n\t" \
+ asm volatile (" msrclr %0, %1; \
+ nop;" \
: "=r"(flags) \
: "i"(MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_disable() \
- do { \
- asm volatile ("# local_irq_disable \n\t" \
- "msrclr r0, %0 \n\t" \
- "nop \n\t" \
- : \
- : "i"(MSR_IE) \
- : "memory"); \
+# define raw_local_irq_disable() \
+ do { \
+ asm volatile (" msrclr r0, %0; \
+ nop;" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
} while (0)
-# define local_irq_enable() \
- do { \
- asm volatile ("# local_irq_enable \n\t" \
- "msrset r0, %0 \n\t" \
- "nop \n\t" \
- : \
- : "i"(MSR_IE) \
- : "memory"); \
+# define raw_local_irq_enable() \
+ do { \
+ asm volatile (" msrset r0, %0; \
+ nop;" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
} while (0)
# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */
-# define local_irq_save(flags) \
+# define raw_local_irq_save(flags) \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_save \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "andi %1, %0, %2 \n\t" \
- "mts rmsr, %1 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ andi %1, %0, %2; \
+ mts rmsr, %1; \
+ nop;" \
: "=r"(flags), "=r" (tmp) \
: "i"(~MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_disable() \
+# define raw_local_irq_disable() \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_disable \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "andi %0, %0, %1 \n\t" \
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ andi %0, %0, %1; \
+ mts rmsr, %0; \
+ nop;" \
: "=r"(tmp) \
: "i"(~MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_enable() \
+# define raw_local_irq_enable() \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_enable \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "ori %0, %0, %1 \n\t" \
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ ori %0, %0, %1; \
+ mts rmsr, %0; \
+ nop;" \
: "=r"(tmp) \
: "i"(MSR_IE) \
: "memory"); \
@@ -89,35 +84,28 @@
# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
-#define local_save_flags(flags) \
+#define raw_local_irq_restore(flags) \
do { \
- asm volatile ("# local_save_flags \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- : "=r"(flags) \
+ asm volatile (" mts rmsr, %0; \
+ nop;" \
: \
+ : "r"(flags) \
: "memory"); \
} while (0)
-#define local_irq_restore(flags) \
- do { \
- asm volatile ("# local_irq_restore \n\t"\
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
- : \
- : "r"(flags) \
- : "memory"); \
- } while (0)
-
-static inline int irqs_disabled(void)
+static inline unsigned long get_msr(void)
{
unsigned long flags;
-
- local_save_flags(flags);
- return ((flags & MSR_IE) == 0);
+ asm volatile (" mfs %0, rmsr; \
+ nop;" \
+ : "=r"(flags) \
+ : \
+ : "memory"); \
+ return flags;
}
-#define raw_irqs_disabled irqs_disabled
-#define raw_irqs_disabled_flags(flags) ((flags) == 0)
+#define raw_local_save_flags(flags) ((flags) = get_msr())
+#define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0)
+#define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0)
#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 880c988c2237..9b66c0fa9a32 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -164,7 +164,8 @@ extern int page_is_ram(unsigned long pfn);
# endif /* CONFIG_MMU */
# ifndef CONFIG_MMU
-# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
+# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \
+ ((pfn) <= (min_low_pfn + max_mapnr)))
# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
# else /* CONFIG_MMU */
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index b0131da1387b..7547f5064560 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -106,9 +106,6 @@ extern inline void free_pgd_slow(pgd_t *pgd)
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-/* FIXME two definition - look below */
-#define pmd_free(mm, x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
@@ -192,14 +189,14 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-/*#define pmd_free(mm, x) do { } while (0)*/
-#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
#endif /* CONFIG_MMU */
-#define check_pgt_cache() do {} while (0)
+#define check_pgt_cache() do { } while (0)
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 66f1b30dd097..e38abc7714b6 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -76,20 +76,23 @@ struct pvr_s {
#define PVR3_FSL_LINKS_MASK 0x00000380
/* ICache config PVR masks */
-#define PVR4_USE_ICACHE_MASK 0x80000000
-#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
-#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
-#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
-#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
-#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR4_USE_ICACHE_MASK 0x80000000 /* ICU */
+#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* ICTS */
+#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 /* ICW */
+#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 /* ICLL */
+#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 /* ICBS */
+#define PVR4_ICACHE_ALWAYS_USED 0x00008000 /* IAU */
+#define PVR4_ICACHE_INTERFACE 0x00002000 /* ICI */
/* DCache config PVR masks */
-#define PVR5_USE_DCACHE_MASK 0x80000000
-#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
-#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
-#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
-#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
-#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR5_USE_DCACHE_MASK 0x80000000 /* DCU */
+#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* DCTS */
+#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 /* DCW */
+#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 /* DCLL */
+#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 /* DCBS */
+#define PVR5_DCACHE_ALWAYS_USED 0x00008000 /* DAU */
+#define PVR5_DCACHE_USE_WRITEBACK 0x00004000 /* DWB */
+#define PVR5_DCACHE_INTERFACE 0x00002000 /* DCI */
/* ICache base address PVR mask */
#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
@@ -178,11 +181,14 @@ struct pvr_s {
((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+/* FIXME two shifts on one line needs any comment */
#define PVR_DCACHE_LINE_LEN(pvr) \
(1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
#define PVR_DCACHE_BYTE_SIZE(pvr) \
(1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_USE_WRITEBACK(pvr) \
+ ((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index ed67c9ed15b8..7f31394985e0 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -35,6 +35,8 @@ extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr);
# endif /* CONFIG_MMU */
+extern void of_platform_reset_gpio_probe(void);
+
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index b1ed61590660..157970688b2a 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -16,6 +16,8 @@
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+
struct task_struct;
struct thread_info;
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5431b4631a7a..371bd6e56d9a 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -272,8 +272,9 @@ static inline int clear_user(char *to, int size)
return size;
}
-extern unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size);
+#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
+#define __copy_from_user_inatomic(to, from, n) \
+ copy_from_user((to), (from), (n))
#define copy_to_user(to, from, n) \
(access_ok(VERIFY_WRITE, (to), (n)) ? \
@@ -290,10 +291,6 @@ extern unsigned long __copy_tofrom_user(void __user *to,
(void __user *)(from), (n)) \
: -EFAULT)
-#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- copy_from_user((to), (from), (n))
-
extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
@@ -305,6 +302,9 @@ extern int __strnlen_user(const char __user *sstr, int len);
#endif /* CONFIG_MMU */
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index d487729683de..b07594eccf9b 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -2,12 +2,22 @@
# Makefile
#
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early boot code and low level code
+CFLAGS_REMOVE_timer.o = -pg
+CFLAGS_REMOVE_intc.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_selfmod.o = -pg
+CFLAGS_REMOVE_heartbeat.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
extra-y := head.o vmlinux.lds
obj-y += exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
- setup.o signal.o sys_microblaze.o timer.o traps.o
+ setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
obj-y += cpu/
@@ -16,5 +26,7 @@ obj-$(CONFIG_SELFMOD) += selfmod.o
obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile
index 20646e549271..59cc7bceaf8c 100644
--- a/arch/microblaze/kernel/cpu/Makefile
+++ b/arch/microblaze/kernel/cpu/Makefile
@@ -2,6 +2,10 @@
# Build the appropriate CPU version support
#
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cache.o = -pg
+endif
+
EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \
-DCPU_REV=$(CPU_REV)
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index af866a450125..d9d63831cc2f 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -13,243 +13,534 @@
#include <asm/cacheflush.h>
#include <linux/cache.h>
#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
-/* Exported functions */
+static inline void __invalidate_flush_icache(unsigned int addr)
+{
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (addr));
+}
+
+static inline void __flush_dcache(unsigned int addr)
+{
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (addr));
+}
+
+static inline void __invalidate_dcache(unsigned int baseaddr,
+ unsigned int offset)
+{
+ __asm__ __volatile__ ("wdc.clear %0, %1;" \
+ : : "r" (baseaddr), "r" (offset));
+}
-void _enable_icache(void)
+static inline void __enable_icache_msr(void)
{
- if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop; " \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __disable_icache_msr(void)
+{
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop; " \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __enable_dcache_msr(void)
+{
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
-#endif
- }
}
-void _disable_icache(void)
+static inline void __disable_dcache_msr(void)
{
- if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+}
+
+static inline void __enable_icache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
: "memory", "r12");
-#endif
- }
}
-void _invalidate_icache(unsigned int addr)
+static inline void __disable_icache_nomsr(void)
{
- if (cpuinfo.use_icache) {
- __asm__ __volatile__ (" \
- wic %0, r0" \
- : \
- : "r" (addr));
- }
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
}
-void _enable_dcache(void)
+static inline void __enable_dcache_nomsr(void)
{
- if (cpuinfo.use_dcache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory", "r12");
-#endif
- }
}
-void _disable_dcache(void)
+static inline void __disable_dcache_nomsr(void)
{
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory", "r12");
-#endif
}
-void _invalidate_dcache(unsigned int addr)
+
+/* Helper macro for computing the limits of cache range loops */
+#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
+do { \
+ int align = ~(cache_line_length - 1); \
+ end = min(start + cache_size, end); \
+ start &= align; \
+ end = ((end & align) + cache_line_length); \
+} while (0);
+
+/*
+ * Helper macro to loop over the specified cache_size/line_length and
+ * execute 'op' on that cacheline
+ */
+#define CACHE_ALL_LOOP(cache_size, line_length, op) \
+do { \
+ unsigned int len = cache_size; \
+ int step = -line_length; \
+ BUG_ON(step >= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, r0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
+ " : : "r" (len), "r" (step) \
+ : "memory"); \
+} while (0);
+
+
+#define CACHE_ALL_LOOP2(cache_size, line_length, op) \
+do { \
+ unsigned int len = cache_size; \
+ int step = -line_length; \
+ BUG_ON(step >= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " r0, %0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
+ " : : "r" (len), "r" (step) \
+ : "memory"); \
+} while (0);
+
+/* for wdc.flush/clear */
+#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
+do { \
+ int step = -line_length; \
+ int count = end - start; \
+ BUG_ON(count <= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, %1; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
+ " : : "r" (start), "r" (count), \
+ "r" (step) : "memory"); \
+} while (0);
+
+/* It is used only first parameter for OP - for wic, wdc */
+#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
+do { \
+ int step = -line_length; \
+ int count = end - start; \
+ BUG_ON(count <= 0); \
+ \
+ __asm__ __volatile__ (" 1: addk %0, %0, %1; \
+ " #op " %0, r0; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
+ " : : "r" (start), "r" (count), \
+ "r" (step) : "memory"); \
+} while (0);
+
+static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
- __asm__ __volatile__ (" \
- wdc %0, r0" \
- : \
- : "r" (addr));
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_msr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_all(void)
+static void __flush_icache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
{
- unsigned int i;
- unsigned flags;
+ unsigned long flags;
- if (cpuinfo.use_icache) {
- local_irq_save(flags);
- __disable_icache();
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
- /* Just loop through cache size and invalidate, no need to add
- CACHE_BASE address */
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line)
- __invalidate_icache(i);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
- __enable_icache();
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_range(unsigned long start, unsigned long end)
+static void __flush_icache_range_noirq(unsigned long start,
+ unsigned long end)
{
- unsigned int i;
- unsigned flags;
- unsigned int align;
-
- if (cpuinfo.use_icache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.icache_size, end);
- align = ~(cpuinfo.icache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.icache_line);
-
- local_irq_save(flags);
- __disable_icache();
-
- for (i = start; i < end; i += cpuinfo.icache_line)
- __invalidate_icache(i);
-
- __enable_icache();
- local_irq_restore(flags);
- }
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+}
+
+static void __flush_icache_all_msr_irq(void)
+{
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_msr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_all_nomsr_irq(void)
+{
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
+static void __flush_icache_all_noirq(void)
{
- __invalidate_icache_all();
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
}
-void __invalidate_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
+static void __invalidate_dcache_all_msr_irq(void)
{
- __invalidate_icache_all();
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_msr();
+ local_irq_restore(flags);
}
-void __invalidate_cache_sigtramp(unsigned long addr)
+static void __invalidate_dcache_all_nomsr_irq(void)
{
- __invalidate_icache_range(addr, addr + 8);
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_dcache_all(void)
+static void __invalidate_dcache_all_noirq_wt(void)
{
- unsigned int i;
- unsigned flags;
-
- if (cpuinfo.use_dcache) {
- local_irq_save(flags);
- __disable_dcache();
-
- /*
- * Just loop through cache size and invalidate,
- * no need to add CACHE_BASE address
- */
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
-
- __enable_dcache();
- local_irq_restore(flags);
- }
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
}
-void __invalidate_dcache_range(unsigned long start, unsigned long end)
+/* FIXME this is weird - should be only wdc but not work
+ * MS: I am getting bus errors and other weird things */
+static void __invalidate_dcache_all_wb(void)
{
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc.clear)
+
+#if 0
unsigned int i;
- unsigned flags;
- unsigned int align;
-
- if (cpuinfo.use_dcache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.dcache_size, end);
- align = ~(cpuinfo.dcache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.dcache_line);
- local_irq_save(flags);
- __disable_dcache();
-
- for (i = start; i < end; i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
-
- __enable_dcache();
- local_irq_restore(flags);
- }
+
+ pr_debug("%s\n", __func__);
+
+ /* Just loop through cache size and invalidate it */
+ for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
+ __invalidate_dcache(0, i);
+#endif
+}
+
+static void __invalidate_dcache_range_wb(unsigned long start,
+ unsigned long end)
+{
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+}
+
+static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
+ unsigned long end)
+{
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
}
-void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
+static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
+ unsigned long end)
{
- __invalidate_dcache_all();
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_msr();
+ local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __flush_dcache_all_wb(void)
+{
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc.flush);
}
-void __invalidate_dcache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
+static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
- __invalidate_dcache_all();
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+}
+
+/* struct for wb caches and for wt caches */
+struct scache *mbc;
+
+/* new wb cache model */
+const struct scache wb_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* There is only difference in ie, id, de, dd functions */
+const struct scache wb_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* Old wt cache model with disabling irq and turn off cache */
+const struct scache wt_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_msr_irq,
+ .iflr = __flush_icache_range_msr_irq,
+ .iin = __flush_icache_all_msr_irq,
+ .iinr = __flush_icache_range_msr_irq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_msr_irq,
+ .dflr = __invalidate_dcache_range_msr_irq_wt,
+ .din = __invalidate_dcache_all_msr_irq,
+ .dinr = __invalidate_dcache_range_msr_irq_wt,
+};
+
+const struct scache wt_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_nomsr_irq,
+ .iflr = __flush_icache_range_nomsr_irq,
+ .iin = __flush_icache_all_nomsr_irq,
+ .iinr = __flush_icache_range_nomsr_irq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_nomsr_irq,
+ .dflr = __invalidate_dcache_range_nomsr_irq,
+ .din = __invalidate_dcache_all_nomsr_irq,
+ .dinr = __invalidate_dcache_range_nomsr_irq,
+};
+
+/* New wt cache model for newer Microblaze versions */
+const struct scache wt_msr_noirq = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+const struct scache wt_nomsr_noirq = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
+#define CPUVER_7_20_A 0x0c
+#define CPUVER_7_20_D 0x0f
+
+#define INFO(s) printk(KERN_INFO "cache: " s " \n");
+
+void microblaze_cache_init(void)
+{
+ if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
+ if (cpuinfo.dcache_wb) {
+ INFO("wb_msr");
+ mbc = (struct scache *)&wb_msr;
+ if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ INFO("WB won't work properly");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ INFO("wt_msr_noirq");
+ mbc = (struct scache *)&wt_msr_noirq;
+ } else {
+ INFO("wt_msr");
+ mbc = (struct scache *)&wt_msr;
+ }
+ }
+ } else {
+ if (cpuinfo.dcache_wb) {
+ INFO("wb_nomsr");
+ mbc = (struct scache *)&wb_nomsr;
+ if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ INFO("WB won't work properly");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ INFO("wt_nomsr_noirq");
+ mbc = (struct scache *)&wt_nomsr_noirq;
+ } else {
+ INFO("wt_nomsr");
+ mbc = (struct scache *)&wt_nomsr;
+ }
+ }
+ }
}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index c259786e7faa..f72dbd66c844 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -21,8 +21,14 @@
*/
#define CI(c, p) { ci->c = PVR_##p(pvr); }
+
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
#define err_printk(x) \
early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
+#else
+#define err_printk(x) \
+ printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n");
+#endif
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
{
@@ -70,7 +76,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(use_icache, USE_ICACHE);
CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
CI(icache_write, ICACHE_ALLOW_WR);
- CI(icache_line, ICACHE_LINE_LEN);
+ ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
CI(icache_size, ICACHE_BYTE_SIZE);
CI(icache_base, ICACHE_BASEADDR);
CI(icache_high, ICACHE_HIGHADDR);
@@ -78,11 +84,16 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(use_dcache, USE_DCACHE);
CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
CI(dcache_write, DCACHE_ALLOW_WR);
- CI(dcache_line, DCACHE_LINE_LEN);
+ ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
CI(dcache_size, DCACHE_BYTE_SIZE);
CI(dcache_base, DCACHE_BASEADDR);
CI(dcache_high, DCACHE_HIGHADDR);
+ temp = PVR_DCACHE_USE_WRITEBACK(pvr);
+ if (ci->dcache_wb != temp)
+ err_printk("DCACHE WB");
+ ci->dcache_wb = temp;
+
CI(use_dopb, D_OPB);
CI(use_iopb, I_OPB);
CI(use_dlmb, D_LMB);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index adb448f93d5f..6095aa6b5c88 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -72,12 +72,12 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
ci->use_icache = fcpu(cpu, "xlnx,use-icache");
ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
- ci->icache_line = fcpu(cpu, "xlnx,icache-line-len") << 2;
- if (!ci->icache_line) {
+ ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
+ if (!ci->icache_line_length) {
if (fcpu(cpu, "xlnx,icache-use-fsl"))
- ci->icache_line = 4 << 2;
+ ci->icache_line_length = 4 << 2;
else
- ci->icache_line = 1 << 2;
+ ci->icache_line_length = 1 << 2;
}
ci->icache_size = fcpu(cpu, "i-cache-size");
ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
@@ -86,16 +86,17 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
- ci->dcache_line = fcpu(cpu, "xlnx,dcache-line-len") << 2;
- if (!ci->dcache_line) {
+ ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
+ if (!ci->dcache_line_length) {
if (fcpu(cpu, "xlnx,dcache-use-fsl"))
- ci->dcache_line = 4 << 2;
+ ci->dcache_line_length = 4 << 2;
else
- ci->dcache_line = 1 << 2;
+ ci->dcache_line_length = 1 << 2;
}
ci->dcache_size = fcpu(cpu, "d-cache-size");
ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
+ ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 3539babc1c18..991d71311b0e 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -29,11 +29,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.20.a", 0x0c},
{"7.20.b", 0x0d},
{"7.20.c", 0x0e},
- /* FIXME There is no keycode defined in MBV for these versions */
- {"2.10.a", 0x10},
- {"3.00.a", 0x20},
- {"4.00.a", 0x30},
- {"4.00.b", 0x40},
+ {"7.20.d", 0x0f},
+ {"7.30.a", 0x10},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 4dcfccdbc364..0c912b2a8e03 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -103,11 +103,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else
count += seq_printf(m, "Icache:\t\tno\n");
- if (cpuinfo.use_dcache)
+ if (cpuinfo.use_dcache) {
count += seq_printf(m,
"Dcache:\t\t%ukB\n",
cpuinfo.dcache_size >> 10);
- else
+ if (cpuinfo.dcache_wb)
+ count += seq_printf(m, "\t\twrite-back\n");
+ else
+ count += seq_printf(m, "\t\twrite-through\n");
+ } else
count += seq_printf(m, "Dcache:\t\tno\n");
count += seq_printf(m,
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index c9a4340ddd53..9bee9382bf74 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -45,7 +45,7 @@
int cpu_has_pvr(void)
{
- unsigned flags;
+ unsigned long flags;
unsigned pvr0;
local_save_flags(flags);
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 9083d85376a4..95b0855802df 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -208,8 +208,6 @@ ENTRY(_user_exception)
lwi r1, r1, TS_THREAD_INFO /* get the thread info */
/* calculate kernel stack pointer */
addik r1, r1, THREAD_SIZE - PT_SIZE
- swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
- lwi r11, r0, PER_CPU(KM) /* load mode indicator */
2:
swi r11, r1, PT_MODE /* store the mode */
lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index e3ecb36dd554..3bad4ff49471 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -31,6 +31,8 @@
#include <linux/errno.h>
#include <asm/signal.h>
+#undef DEBUG
+
/* The size of a state save frame. */
#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
@@ -352,10 +354,12 @@ C_ENTRY(_user_exception):
add r12, r12, r12; /* convert num -> ptr */
add r12, r12, r12;
+#ifdef DEBUG
/* Trac syscalls and stored them to r0_ram */
lwi r3, r12, 0x400 + r0_ram
addi r3, r3, 1
swi r3, r12, 0x400 + r0_ram
+#endif
# Find and jump into the syscall handler.
lwi r12, r12, sys_call_table
@@ -496,17 +500,6 @@ C_ENTRY(sys_execve):
brid microblaze_execve; /* Do real work (tail-call).*/
nop;
-C_ENTRY(sys_rt_sigsuspend_wrapper):
- swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- swi r4, r1, PTO+PT_R4;
- la r7, r1, PTO; /* add user context as 3rd arg */
- brlid r15, sys_rt_sigsuspend; /* Do real work.*/
- nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
- bri ret_from_trap /* fall through will not work here due to align */
- nop;
-
C_ENTRY(sys_rt_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
@@ -711,15 +704,11 @@ C_ENTRY(ret_from_exc):
* (in a possibly modified form) after do_signal returns.
* store return registers separately because this macros is use
* for others exceptions */
- swi r3, r1, PTO + PT_R3;
- swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
new file mode 100644
index 000000000000..388b31ca65a1
--- /dev/null
+++ b/arch/microblaze/kernel/ftrace.c
@@ -0,0 +1,237 @@
+/*
+ * Ftrace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * Based on MIPS and PowerPC ftrace code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ asm volatile(" 1: lwi %0, %2, 0; \
+ 2: swi %3, %2, 0; \
+ addik %1, r0, 0; \
+ 3: \
+ .section .fixup, \"ax\"; \
+ 4: brid 3b; \
+ addik %1, r0, 1; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b; \
+ .word 2b,4b; \
+ .previous;" \
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* save value to addr - it is save to do it in asm */
+static int ftrace_modify_code(unsigned long addr, unsigned int value)
+{
+ int faulted = 0;
+
+ __asm__ __volatile__(" 1: swi %2, %1, 0; \
+ addik %0, r0, 0; \
+ 2: \
+ .section .fixup, \"ax\"; \
+ 3: brid 2b; \
+ addik %0, r0, 1; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,3b; \
+ .previous;" \
+ : "=r" (faulted)
+ : "r" (addr), "r" (value)
+ );
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define MICROBLAZE_NOP 0x80000000
+#define MICROBLAZE_BRI 0xb800000C
+
+static unsigned int recorded; /* if save was or not */
+static unsigned int imm; /* saving whole imm instruction */
+
+/* There are two approaches howto solve ftrace_make nop function - look below */
+#undef USE_FTRACE_NOP
+
+#ifdef USE_FTRACE_NOP
+static unsigned int bralid; /* saving whole bralid instruction */
+#endif
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ /* we have this part of code which we are working with
+ * b000c000 imm -16384
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ *
+ * The first solution (!USE_FTRACE_NOP-could be called branch solution)
+ * b000c000 bri 12 (0xC - jump to any other instruction)
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ * any other instruction
+ *
+ * The second solution (USE_FTRACE_NOP) - no jump just nops
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ */
+ int ret = 0;
+
+ if (recorded == 0) {
+ recorded = 1;
+ imm = *(unsigned int *)rec->ip;
+ pr_debug("%s: imm:0x%x\n", __func__, imm);
+#ifdef USE_FTRACE_NOP
+ bralid = *(unsigned int *)(rec->ip + 4);
+ pr_debug("%s: bralid 0x%x\n", __func__, bralid);
+#endif /* USE_FTRACE_NOP */
+ }
+
+#ifdef USE_FTRACE_NOP
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
+ ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
+#else /* USE_FTRACE_NOP */
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+static int ret_addr; /* initialized as 0 by default */
+
+/* I believe that first is called ftrace_make_nop before this function */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret;
+ ret_addr = addr; /* saving where the barrier jump is */
+ pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
+ __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
+ ret = ftrace_modify_code(rec->ip, imm);
+#ifdef USE_FTRACE_NOP
+ pr_debug("%s: bralid:0x%x\n", __func__, bralid);
+ ret += ftrace_modify_code(rec->ip + 4, bralid);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* The return code is retured via data */
+ *(unsigned long *)data = 0;
+
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned int upper = (unsigned int)func;
+ unsigned int lower = (unsigned int)func;
+ int ret = 0;
+
+ /* create proper saving to ftrace_call poll */
+ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
+ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
+
+ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
+ __func__, (unsigned int)func, (unsigned int)ip, upper, lower);
+
+ /* save upper and lower code */
+ ret = ftrace_modify_code(ip, upper);
+ ret += ftrace_modify_code(ip + 4, lower);
+
+ /* We just need to remove the rtsd r15, 8 by NOP */
+ BUG_ON(!ret_addr);
+ if (ret_addr)
+ ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP);
+ else
+ ret = 1; /* fault */
+
+ /* All changes are done - lets do caches consistent */
+ flush_icache();
+ return ret;
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+unsigned int old_jump; /* saving place for jump instruction */
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ old_jump = *(unsigned int *)ip; /* save jump over instruction */
+ ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
+ flush_icache();
+
+ pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
+ return ret;
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ ret = ftrace_modify_code(ip, old_jump);
+ flush_icache();
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 1bdf20222b92..522751737cfa 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -45,6 +45,7 @@ void heartbeat(void)
void setup_heartbeat(void)
{
struct device_node *gpio = NULL;
+ int *prop;
int j;
char *gpio_list[] = {
"xlnx,xps-gpio-1.00.a",
@@ -58,10 +59,14 @@ void setup_heartbeat(void)
break;
}
- base_addr = *(int *) of_get_property(gpio, "reg", NULL);
- base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
- printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
+ if (gpio) {
+ base_addr = *(int *) of_get_property(gpio, "reg", NULL);
+ base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
+ printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
- if (*(int *) of_get_property(gpio, "xlnx,is-bidir", NULL))
- out_be32(base_addr + 4, 0); /* GPIO is configured as output */
+ /* GPIO is configured as output */
+ prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
+ if (prop)
+ out_be32(base_addr + 4, 0);
+ }
}
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 6eea6f92b84e..03172c1da770 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -42,8 +42,16 @@ unsigned int nr_irq;
static void intc_enable_or_unmask(unsigned int irq)
{
+ unsigned long mask = 1 << irq;
pr_debug("enable_or_unmask: %d\n", irq);
- out_be32(INTC_BASE + SIE, 1 << irq);
+ out_be32(INTC_BASE + SIE, mask);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irq_desc[irq].status & IRQ_LEVEL)
+ out_be32(INTC_BASE + IAR, mask);
}
static void intc_disable_or_mask(unsigned int irq)
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 7d5ddd62d4d2..0f06034d1fe0 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < nr_irq) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
new file mode 100644
index 000000000000..e7eaa7a8cbd3
--- /dev/null
+++ b/arch/microblaze/kernel/mcount.S
@@ -0,0 +1,170 @@
+/*
+ * Low-level ftrace handling
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define NOALIGN_ENTRY(name) .globl name; name:
+
+/* FIXME MS: I think that I don't need to save all regs */
+#define SAVE_REGS \
+ addik r1, r1, -120; \
+ swi r2, r1, 4; \
+ swi r3, r1, 8; \
+ swi r4, r1, 12; \
+ swi r5, r1, 116; \
+ swi r6, r1, 16; \
+ swi r7, r1, 20; \
+ swi r8, r1, 24; \
+ swi r9, r1, 28; \
+ swi r10, r1, 32; \
+ swi r11, r1, 36; \
+ swi r12, r1, 40; \
+ swi r13, r1, 44; \
+ swi r14, r1, 48; \
+ swi r16, r1, 52; \
+ swi r17, r1, 56; \
+ swi r18, r1, 60; \
+ swi r19, r1, 64; \
+ swi r20, r1, 68; \
+ swi r21, r1, 72; \
+ swi r22, r1, 76; \
+ swi r23, r1, 80; \
+ swi r24, r1, 84; \
+ swi r25, r1, 88; \
+ swi r26, r1, 92; \
+ swi r27, r1, 96; \
+ swi r28, r1, 100; \
+ swi r29, r1, 104; \
+ swi r30, r1, 108; \
+ swi r31, r1, 112;
+
+#define RESTORE_REGS \
+ lwi r2, r1, 4; \
+ lwi r3, r1, 8; \
+ lwi r4, r1, 12; \
+ lwi r5, r1, 116; \
+ lwi r6, r1, 16; \
+ lwi r7, r1, 20; \
+ lwi r8, r1, 24; \
+ lwi r9, r1, 28; \
+ lwi r10, r1, 32; \
+ lwi r11, r1, 36; \
+ lwi r12, r1, 40; \
+ lwi r13, r1, 44; \
+ lwi r14, r1, 48; \
+ lwi r16, r1, 52; \
+ lwi r17, r1, 56; \
+ lwi r18, r1, 60; \
+ lwi r19, r1, 64; \
+ lwi r20, r1, 68; \
+ lwi r21, r1, 72; \
+ lwi r22, r1, 76; \
+ lwi r23, r1, 80; \
+ lwi r24, r1, 84; \
+ lwi r25, r1, 88; \
+ lwi r26, r1, 92; \
+ lwi r27, r1, 96; \
+ lwi r28, r1, 100; \
+ lwi r29, r1, 104; \
+ lwi r30, r1, 108; \
+ lwi r31, r1, 112; \
+ addik r1, r1, 120;
+
+ENTRY(ftrace_stub)
+ rtsd r15, 8;
+ nop;
+
+ENTRY(_mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+ /* MS: It is just barrier which is removed from C code */
+ rtsd r15, 8
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ SAVE_REGS
+ swi r15, r1, 0;
+ /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */
+ lwi r5, r0, function_trace_stop;
+ bneid r5, end;
+ nop;
+ /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef CONFIG_DYNAMIC_FTRACE
+ lwi r5, r0, ftrace_graph_return;
+ addik r6, r0, ftrace_stub; /* asm implementation */
+ cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+
+ lwi r6, r0, ftrace_graph_entry;
+ addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
+ cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call_graph)
+ /* MS: jump over graph function - replaced from C code */
+ bri end_graph_tracer
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ addik r5, r1, 120; /* MS: load parent addr */
+ addik r6, r15, 0; /* MS: load current function addr */
+ bralid r15, prepare_ftrace_return;
+ nop;
+ /* MS: graph was taken that's why - can jump over function trace */
+ brid end;
+ nop;
+end_graph_tracer:
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifndef CONFIG_DYNAMIC_FTRACE
+ /* MS: test function trace if is taken or not */
+ lwi r20, r0, ftrace_trace_function;
+ addik r6, r0, ftrace_stub;
+ cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
+ beqid r5, end; /* MS: not taken -> jump over */
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call)
+/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */
+ nop
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+/* static normal trace */
+ lwi r6, r1, 120; /* MS: load parent addr */
+ addik r5, r15, 0; /* MS: load current function addr */
+ /* MS: here is dependency on previous code */
+ brald r15, r20; /* MS: jump to ftrace handler */
+ nop;
+end:
+ lwi r15, r1, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+ nop; /* MS: just barrier for rtsd r15, 8 */
+ nop;
+ SAVE_REGS
+ swi r15, r1, 0;
+
+ /* MS: find out returning address */
+ bralid r15, ftrace_return_to_handler;
+ nop;
+
+ /* MS: return value from ftrace_return_to_handler is my returning addr
+ * must be before restore regs because I have to restore r3 content */
+ addik r15, r3, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 59ff20e33e0c..bc4dcb7d3861 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <asm/page.h>
#include <asm/system.h>
+#include <linux/ftrace.h>
#include <linux/uaccess.h>
/*
@@ -47,3 +48,7 @@ extern void __umodsi3(void);
EXPORT_SYMBOL(__umodsi3);
extern char *_ebss;
EXPORT_SYMBOL_GPL(_ebss);
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index c592d475b3d8..812f1bf06c9e 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
{
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
new file mode 100644
index 000000000000..a1721a33042e
--- /dev/null
+++ b/arch/microblaze/kernel/reset.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+/* Trigger specific functions */
+#ifdef CONFIG_GPIOLIB
+
+#include <linux/of_gpio.h>
+
+static int handle; /* reset pin handle */
+static unsigned int reset_val;
+
+static int of_reset_gpio_handle(void)
+{
+ int ret; /* variable which stored handle reset gpio pin */
+ struct device_node *root; /* root node */
+ struct device_node *gpio; /* gpio node */
+ struct of_gpio_chip *of_gc = NULL;
+ enum of_gpio_flags flags ;
+ const void *gpio_spec;
+
+ /* find out root node */
+ root = of_find_node_by_path("/");
+
+ /* give me handle for gpio node to be possible allocate pin */
+ ret = of_parse_phandles_with_args(root, "hard-reset-gpios",
+ "#gpio-cells", 0, &gpio, &gpio_spec);
+ if (ret) {
+ pr_debug("%s: can't parse gpios property\n", __func__);
+ goto err0;
+ }
+
+ of_gc = gpio->data;
+ if (!of_gc) {
+ pr_debug("%s: gpio controller %s isn't registered\n",
+ root->full_name, gpio->full_name);
+ ret = -ENODEV;
+ goto err1;
+ }
+
+ ret = of_gc->xlate(of_gc, root, gpio_spec, &flags);
+ if (ret < 0)
+ goto err1;
+
+ ret += of_gc->gc.base;
+err1:
+ of_node_put(gpio);
+err0:
+ pr_debug("%s exited with status %d\n", __func__, ret);
+ return ret;
+}
+
+void of_platform_reset_gpio_probe(void)
+{
+ int ret;
+ handle = of_reset_gpio_handle();
+
+ if (!gpio_is_valid(handle)) {
+ printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n",
+ handle, "reset");
+ }
+
+ ret = gpio_request(handle, "reset");
+ if (ret < 0) {
+ printk(KERN_INFO "GPIO pin is already allocated\n");
+ return;
+ }
+
+ /* get current setup value */
+ reset_val = gpio_get_value(handle);
+ /* FIXME maybe worth to perform any action */
+ pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
+
+ /* Setup GPIO as output */
+ ret = gpio_direction_output(handle, 0);
+ if (ret < 0)
+ goto err;
+
+ /* Setup output direction */
+ gpio_set_value(handle, 0);
+
+ printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n",
+ handle, reset_val);
+ return;
+err:
+ gpio_free(handle);
+ return;
+}
+
+
+static void gpio_system_reset(void)
+{
+ gpio_set_value(handle, 1 - reset_val);
+}
+#else
+#define gpio_system_reset() do {} while (0)
+void of_platform_reset_gpio_probe(void)
+{
+ return;
+}
+#endif
+
+void machine_restart(char *cmd)
+{
+ printk(KERN_NOTICE "Machine restart...\n");
+ gpio_system_reset();
+ dump_stack();
+ while (1)
+ ;
+}
+
+void machine_shutdown(void)
+{
+ printk(KERN_NOTICE "Machine shutdown...\n");
+ while (1)
+ ;
+}
+
+void machine_halt(void)
+{
+ printk(KERN_NOTICE "Machine halt...\n");
+ while (1)
+ ;
+}
+
+void machine_power_off(void)
+{
+ printk(KERN_NOTICE "Machine power off...\n");
+ while (1)
+ ;
+}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8c1e0f4dcf18..5372b24ad049 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -52,13 +52,12 @@ void __init setup_arch(char **cmdline_p)
/* irq_early_init(); */
setup_cpuinfo();
- __invalidate_icache_all();
- __enable_icache();
+ microblaze_cache_init();
- __invalidate_dcache_all();
- __enable_dcache();
+ enable_dcache();
- panic_timeout = 120;
+ invalidate_icache();
+ enable_icache();
setup_memory();
@@ -131,6 +130,8 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
#endif
+ lockdep_init();
+
/* initialize device tree for usage in early_printk */
early_init_devtree((void *)_fdt_start);
@@ -186,32 +187,3 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
#endif
-
-void machine_restart(char *cmd)
-{
- printk(KERN_NOTICE "Machine restart...\n");
- dump_stack();
- while (1)
- ;
-}
-
-void machine_shutdown(void)
-{
- printk(KERN_NOTICE "Machine shutdown...\n");
- while (1)
- ;
-}
-
-void machine_halt(void)
-{
- printk(KERN_NOTICE "Machine halt...\n");
- while (1)
- ;
-}
-
-void machine_power_off(void)
-{
- printk(KERN_NOTICE "Machine power off...\n");
- while (1)
- ;
-}
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 1c80e4fc40ce..d8d3bb396cd6 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -44,7 +44,6 @@
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall);
-
asmlinkage long
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
@@ -176,6 +175,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
int err = 0;
int signal;
+ unsigned long address = 0;
+#ifdef CONFIG_MMU
+ pmd_t *pmdp;
+ pte_t *ptep;
+#endif
frame = get_sigframe(ka, regs, sizeof(*frame));
@@ -216,8 +220,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
Negative 8 offset because return is rtsd r15, 8 */
regs->r15 = ((unsigned long)frame->tramp)-8;
- __invalidate_cache_sigtramp((unsigned long)frame->tramp);
-
+ address = ((unsigned long)frame->tramp);
+#ifdef CONFIG_MMU
+ pmdp = pmd_offset(pud_offset(
+ pgd_offset(current->mm, address),
+ address), address);
+
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ address = (unsigned long) page_address(pte_page(*ptep));
+ /* MS: I need add offset in page */
+ address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
+ /* MS address is virtual */
+ address = virt_to_phys(address);
+ invalidate_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+ }
+ pte_unmap(ptep);
+ preempt_enable();
+#else
+ flush_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+#endif
if (err)
goto give_sigsegv;
@@ -233,6 +258,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
set_fs(USER_DS);
+ /* the tracer may want to single-step inside the handler */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
#ifdef DEBUG_SIG
printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
new file mode 100644
index 000000000000..123692f22647
--- /dev/null
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -0,0 +1,65 @@
+/*
+ * Stack trace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+
+/* FIXME initial support */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long *sp;
+ unsigned long addr;
+ asm("addik %0, r1, 0" : "=r" (sp));
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned int *sp;
+ unsigned long addr;
+
+ struct thread_info *ti = task_thread_info(tsk);
+
+ if (tsk == current)
+ asm("addik %0, r1, 0" : "=r" (sp));
+ else
+ sp = (unsigned int *)ti->cpu_context.r1;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index b96f365ea6b1..4088be7d4e29 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -183,7 +183,7 @@ ENTRY(sys_call_table)
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend_wrapper
+ .long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown
@@ -303,7 +303,7 @@ ENTRY(sys_call_table)
.long sys_mkdirat
.long sys_mknodat
.long sys_fchownat
- .long sys_ni_syscall
+ .long sys_futimesat
.long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 5499deae7fa6..ed61b2f17719 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -183,6 +183,31 @@ static cycle_t microblaze_read(struct clocksource *cs)
return (cycle_t) (in_be32(TIMER_BASE + TCR1));
}
+static struct timecounter microblaze_tc = {
+ .cc = NULL,
+};
+
+static cycle_t microblaze_cc_read(const struct cyclecounter *cc)
+{
+ return microblaze_read(NULL);
+}
+
+static struct cyclecounter microblaze_cc = {
+ .read = microblaze_cc_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 24,
+};
+
+int __init init_microblaze_timecounter(void)
+{
+ microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC,
+ microblaze_cc.shift);
+
+ timecounter_init(&microblaze_tc, &microblaze_cc, sched_clock());
+
+ return 0;
+}
+
static struct clocksource clocksource_microblaze = {
.name = "microblaze_clocksource",
.rating = 300,
@@ -204,6 +229,9 @@ static int __init microblaze_clocksource_init(void)
out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT);
/* start timer1 - up counting without interrupt */
out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
+
+ /* register timecounter - for ftrace support */
+ init_microblaze_timecounter();
return 0;
}
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e704188d7855..5ef619aad634 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -26,11 +26,12 @@ SECTIONS {
_stext = . ;
*(.text .text.*)
*(.fixup)
- EXIT_TEXT
- EXIT_CALL
+ EXIT_TEXT
+ EXIT_CALL
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
. = ALIGN (4) ;
_etext = . ;
}
@@ -86,6 +87,7 @@ SECTIONS {
_KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
}
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
index 8eb9df5a26c9..a853fe089c44 100644
--- a/arch/microblaze/lib/uaccess.c
+++ b/arch/microblaze/lib/uaccess.c
@@ -39,3 +39,10 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+
+unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size)
+{
+ memcpy(to, from, size);
+ return 0;
+}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a44892e7cd5b..a57cedf36715 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -41,6 +41,7 @@ char *klimit = _end;
* have available.
*/
unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 46c4ca5d15c5..2820081b21ab 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -144,7 +144,6 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
- /* spin_lock(&init_mm.page_table_lock); */
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
@@ -158,9 +157,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
-
}
- /* spin_unlock(&init_mm.page_table_lock); */
return err;
}
@@ -182,12 +179,6 @@ void __init adjust_total_lowmem(void)
#endif
}
-static void show_tmem(unsigned long tmem)
-{
- volatile unsigned long a;
- a = a + tmem;
-}
-
/*
* Map in all of physical memory starting at CONFIG_KERNEL_START.
*/
@@ -197,7 +188,6 @@ void __init mapin_ram(void)
v = CONFIG_KERNEL_START;
p = memory_start;
- show_tmem(memory_size);
for (s = 0; s < memory_size; s += PAGE_SIZE) {
f = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_SHARED | _PAGE_HWEXEC;
diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile
new file mode 100644
index 000000000000..0d0348c8af97
--- /dev/null
+++ b/arch/microblaze/oprofile/Makefile
@@ -0,0 +1,13 @@
+#
+# arch/microblaze/oprofile/Makefile
+#
+
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o
diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c
new file mode 100644
index 000000000000..def17e59888e
--- /dev/null
+++ b/arch/microblaze/oprofile/microblaze_oprofile.c
@@ -0,0 +1,22 @@
+/*
+ * Microblaze oprofile code
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ return -1;
+}
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
index 8e9b4752d3ff..669c7eec293e 100644
--- a/arch/microblaze/platform/Kconfig.platform
+++ b/arch/microblaze/platform/Kconfig.platform
@@ -53,31 +53,12 @@ config OPT_LIB_FUNCTION
config OPT_LIB_ASM
bool "Optimalized lib function ASM"
- depends on OPT_LIB_FUNCTION
+ depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
default n
help
Allows turn on optimalized library function (memcpy and memmove).
Function are written in asm code.
-# This is still a bit broken - disabling for now JW 20070504
-config ALLOW_EDIT_AUTO
- bool "Permit Display/edit of Kconfig.auto platform settings"
- default n
- help
- Allows the editing of auto-generated platform settings from
- the Kconfig.auto file. Obviously this does not change the
- underlying hardware, so be very careful if you go editing
- these settings.
-
- Also, if you enable this, and edit various Kconfig.auto
- settings, YOUR CHANGES WILL BE LOST if you then disable it
- again. You have been warned!
-
- If unsure, say no.
-
-comment "Automatic platform settings from Kconfig.auto"
- depends on ALLOW_EDIT_AUTO
-
if PLATFORM_GENERIC=y
source "arch/microblaze/platform/generic/Kconfig.auto"
endif
diff --git a/arch/microblaze/platform/generic/Kconfig.auto b/arch/microblaze/platform/generic/Kconfig.auto
index fbca22d9c8b9..5d86fc19029d 100644
--- a/arch/microblaze/platform/generic/Kconfig.auto
+++ b/arch/microblaze/platform/generic/Kconfig.auto
@@ -21,7 +21,6 @@
# Definitions for MICROBLAZE0
comment "Definitions for MICROBLAZE0"
- depends on ALLOW_EDIT_AUTO
config KERNEL_BASE_ADDR
hex "Physical address where Linux Kernel is"
@@ -30,33 +29,33 @@ config KERNEL_BASE_ADDR
BASE Address for kernel
config XILINX_MICROBLAZE0_FAMILY
- string "Targetted FPGA family" if ALLOW_EDIT_AUTO
+ string "Targetted FPGA family"
default "virtex5"
config XILINX_MICROBLAZE0_USE_MSR_INSTR
- int "USE_MSR_INSTR range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_MSR_INSTR range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_PCMP_INSTR
- int "USE_PCMP_INSTR range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_PCMP_INSTR range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_BARREL
- int "USE_BARREL range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_BARREL range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_DIV
- int "USE_DIV range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_DIV range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_HW_MUL
- int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)" if ALLOW_EDIT_AUTO
- default 2
+ int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
+ default 0
config XILINX_MICROBLAZE0_USE_FPU
- int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)" if ALLOW_EDIT_AUTO
- default 2
+ int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
+ default 0
config XILINX_MICROBLAZE0_HW_VER
- string "Core version number" if ALLOW_EDIT_AUTO
+ string "Core version number"
default 7.10.d
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/platform/generic/system.dts
index 29993f62b30a..2d5c41767cd0 100644
--- a/arch/microblaze/platform/generic/system.dts
+++ b/arch/microblaze/platform/generic/system.dts
@@ -32,11 +32,16 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,microblaze";
+ hard-reset-gpios = <&LEDs_8Bit 2 1>;
model = "testing";
DDR2_SDRAM: memory@90000000 {
device_type = "memory";
reg = < 0x90000000 0x10000000 >;
} ;
+ aliases {
+ ethernet0 = &Hard_Ethernet_MAC;
+ serial0 = &RS232_Uart_1;
+ } ;
chosen {
bootargs = "console=ttyUL0,115200 highres=on";
linux,stdout-path = "/plb@0/serial@84000000";
@@ -127,7 +132,7 @@
mb_plb: plb@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
+ compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
ranges ;
FLASH: flash@a0000000 {
bank-width = <2>;
@@ -214,12 +219,12 @@
#size-cells = <1>;
compatible = "xlnx,compound";
ethernet@81c00000 {
- compatible = "xlnx,xps-ll-temac-1.01.b";
+ compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
device_type = "network";
interrupt-parent = <&xps_intc_0>;
interrupts = < 5 2 >;
llink-connected = <&PIM3>;
- local-mac-address = [ 02 00 00 00 00 00 ];
+ local-mac-address = [ 00 0a 35 00 00 00 ];
reg = < 0x81c00000 0x40 >;
xlnx,bus2core-clk-ratio = <0x1>;
xlnx,phy-type = <0x1>;
@@ -261,6 +266,33 @@
xlnx,is-dual = <0x0>;
xlnx,tri-default = <0xffffffff>;
xlnx,tri-default-2 = <0xffffffff>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ } ;
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&LEDs_8Bit 4 1>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ yellow {
+ label = "Yellow";
+ gpios = <&LEDs_8Bit 5 1>;
+ };
+
+ red {
+ label = "Red";
+ gpios = <&LEDs_8Bit 6 1>;
+ };
+
+ green {
+ label = "Green";
+ gpios = <&LEDs_8Bit 7 1>;
+ };
} ;
RS232_Uart_1: serial@84000000 {
clock-frequency = <125000000>;
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/platform/platform.c
index 56e0234fa34b..5b89b58c5aed 100644
--- a/arch/microblaze/platform/platform.c
+++ b/arch/microblaze/platform/platform.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
+#include <asm/setup.h>
static struct of_device_id xilinx_of_bus_ids[] __initdata = {
{ .compatible = "simple-bus", },
@@ -26,6 +27,7 @@ static struct of_device_id xilinx_of_bus_ids[] __initdata = {
static int __init microblaze_device_probe(void)
{
of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+ of_platform_reset_gpio_probe();
return 0;
}
device_initcall(microblaze_device_probe);
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f08..21ef9efbde43 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
* becomes equal to the the initial value of the tail.
*/
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return ((counters >> 14) ^ counters) & 0x1fff;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- while (__raw_spin_is_locked(x)) { cpu_relax(); }
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ while (arch_spin_is_locked(x)) { cpu_relax(); }
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
[my_ticket] "=&r" (my_ticket));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_llsc_mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " # __raw_spin_unlock \n"
+ " # arch_spin_unlock \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addiu %[ticket], %[ticket], 1 \n"
" ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
[ticket] "=&r" (tmp));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_unlock \n"
+ " .set push # arch_spin_unlock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
}
-static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
[now_serving] "=&r" (tmp3));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %1, %2 # __raw_read_unlock \n"
+ "1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_unlock \n"
+ " .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
- " # __raw_write_unlock \n"
+ " # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return ret;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c1..ee197c2f9c98 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,14 +12,14 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff4..8b0b4181219f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6d39e222b170..6153b6a05ccf 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, PCIINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, SCUINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, BCUINTR);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
pin = SYSINT1_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign0 = icu1_read(INTASSIGN0);
intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
intassign1 |= (uint16_t)assign << 9;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN0, intassign0);
icu1_write(INTASSIGN1, intassign1);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
pin = SYSINT2_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign2 = icu1_read(INTASSIGN2);
intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
intassign3 |= (uint16_t)assign << 12;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN2, intassign2);
icu1_write(INTASSIGN3, intassign3);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 4c3c58ef5cda..e2d5ed891f37 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
/* display information rows, one per active CPU */
case 1 ... NR_IRQS - 1:
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
break;
/* polish off with NMI and error counters */
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index dacafab00eb2..67e6389d625a 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
#define KPROBE_HIT_ACTIVE 0x00000001
#define KPROBE_HIT_SS 0x00000002
-static struct kprobe *current_kprobe;
-static unsigned long current_kprobe_orig_pc;
-static unsigned long current_kprobe_next_pc;
-static int current_kprobe_ss_flags;
+static struct kprobe *cur_kprobe;
+static unsigned long cur_kprobe_orig_pc;
+static unsigned long cur_kprobe_next_pc;
+static int cur_kprobe_ss_flags;
static unsigned long kprobe_status;
-static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
-static unsigned long current_kprobe_bp_addr;
+static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
+static unsigned long cur_kprobe_bp_addr;
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long nextpc;
- current_kprobe_orig_pc = regs->pc;
- memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
- regs->pc = (unsigned long) current_kprobe_ss_buf;
+ cur_kprobe_orig_pc = regs->pc;
+ memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
+ regs->pc = (unsigned long) cur_kprobe_ss_buf;
- nextpc = find_nextpc(regs, &current_kprobe_ss_flags);
- if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
- current_kprobe_next_pc =
- current_kprobe_orig_pc + (nextpc - regs->pc);
+ nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
+ if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
+ cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
else
- current_kprobe_next_pc = nextpc;
+ cur_kprobe_next_pc = nextpc;
/* branching instructions need special handling */
- if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
+ if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
nextpc = singlestep_branch_setup(regs);
- current_kprobe_bp_addr = nextpc;
+ cur_kprobe_bp_addr = nextpc;
*(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
- mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
- sizeof(current_kprobe_ss_buf));
+ mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
+ sizeof(cur_kprobe_ss_buf));
mn10300_icache_inv();
}
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
disarm_kprobe(p, regs);
ret = 1;
} else {
- p = current_kprobe;
+ p = cur_kprobe;
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
+ cur_kprobe = p;
if (p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
return 1;
@@ -491,8 +490,8 @@ no_kprobe:
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
/* we may need to fixup regs/stack after singlestepping a call insn */
- if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
- regs->pc = current_kprobe_orig_pc;
+ if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
+ regs->pc = cur_kprobe_orig_pc;
switch (p->ainsn.insn[0]) {
case 0xcd: /* CALL (d16,PC) */
*(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
}
}
- regs->pc = current_kprobe_next_pc;
- current_kprobe_bp_addr = 0;
+ regs->pc = cur_kprobe_next_pc;
+ cur_kprobe_bp_addr = 0;
}
static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if (cur_kprobe->post_handler)
+ cur_kprobe->post_handler(cur_kprobe, regs, 0);
- resume_execution(current_kprobe, regs);
+ resume_execution(cur_kprobe, regs);
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
static inline
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler &&
- current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ if (cur_kprobe->fault_handler &&
+ cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
return 1;
if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
+ resume_execution(cur_kprobe, regs);
reset_current_kprobe();
preempt_enable_no_resched();
}
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
switch (val) {
case DIE_BREAKPOINT:
- if (current_kprobe_bp_addr != args->regs->pc) {
+ if (cur_kprobe_bp_addr != args->regs->pc) {
if (kprobe_handler(args->regs))
return NOTIFY_STOP;
} else {
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..74036f436a3b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
return 0;
/* Wait until we have a realistic chance at the lock */
- while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..8c373aa28a86 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
-} raw_spinlock_t;
+} arch_spinlock_t;
typedef struct {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e7610cb33d5..f47465e8d040 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 168fce726201..20de73c36682 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d9ea8d39c342..1d3b270d3083 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -37,7 +37,7 @@ extern void cpu_die(void);
extern void smp_send_debugger_break(int cpu);
extern void smp_message_recv(int);
-DECLARE_PER_CPU(unsigned int, pvr);
+DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(cpumask_t map);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 198266cf9e2d..764094cff681 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
-#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
- return arch_spin_trylock(lock) == 0;
+ return __arch_spin_trylock(lock) == 0;
}
/*
@@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
-extern void __spin_yield(raw_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
#endif
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
}
static inline
-void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
local_save_flags(flags_dis);
local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
- __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
lock->slock = 0;
}
#ifdef CONFIG_PPC64
-extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
#else
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
/*
@@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* read-locks.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_write_can_lock(rw) (!(rw)->lock)
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(raw_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(raw_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
return tmp;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_read_trylock(rw) > 0))
+ if (likely(__arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_write_trylock(rw) == 0))
+ if (likely(__arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
}
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock(rw) > 0;
+ return __arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock(rw) == 0;
+ return __arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) __spin_yield(lock)
-#define _raw_read_relax(lock) __rw_yield(lock)
-#define _raw_write_relax(lock) __rw_yield(lock)
+#define arch_spin_relax(lock) __spin_yield(lock)
+#define arch_read_relax(lock) __rw_yield(lock)
+#define arch_write_relax(lock) __rw_yield(lock)
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 74236c9f05b1..2351adc4fdc4 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile signed int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f6dca4f4b295..9040330b0530 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action || !action->handler)
@@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
if (!desc)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
seq_printf(m, "%5d ", i);
@@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%s\n", p);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 936f04dbfc6f..a3c11cac3d71 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
* Since we can't get PMU interrupts inside a PMU interrupt handler,
* we don't need separate irq and nmi entries here.
*/
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
- struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+ struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
entry->nr = 0;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index bf90361bb70f..fd0d29493fd6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
#include <asm/mmu.h>
struct rtas_t rtas = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
local_irq_save(flags);
preempt_disable();
- __raw_spin_lock_flags(&rtas.lock, flags);
+ arch_spin_lock_flags(&rtas.lock, flags);
return flags;
}
static void unlock_rtas(unsigned long flags)
{
- __raw_spin_unlock(&rtas.lock);
+ arch_spin_unlock(&rtas.lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
return 1;
}
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
{
while (!timebase)
barrier();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 845c72ab7357..03dd6a248198 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
#endif /* CONFIG_TAU */
#ifdef CONFIG_SMP
-DEFINE_PER_CPU(unsigned int, pvr);
+DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif
static int show_cpuinfo(struct seq_file *m, void *v)
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
#ifdef CONFIG_SMP
- pvr = per_cpu(pvr, cpu_id);
+ pvr = per_cpu(cpu_pvr, cpu_id);
#else
pvr = mfspr(SPRN_PVR);
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 97196eefef3e..a521fb8a40ee 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
static void __devinit smp_store_cpu_info(int id)
{
- per_cpu(pvr, id) = mfspr(SPRN_PVR);
+ per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
}
static void __init smp_create_idle(unsigned int cpu)
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470d..58e14fba11b1 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
#include <asm/smp.h>
#include <asm/firmware.h>
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
}
#endif
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
HMT_medium();
}
-EXPORT_SYMBOL(__raw_spin_unlock_wait);
+EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index cc0c854291d7..0bac3a3dbecf 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
u32 status, enable;
/* Mask off the cascaded IRQ */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->mask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
}
/* Processing done; can reenable the cascade now */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED))
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index f9dbf76a763f..6829cf7e2bda 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -54,7 +54,7 @@ struct iic {
struct device_node *node;
};
-static DEFINE_PER_CPU(struct iic, iic);
+static DEFINE_PER_CPU(struct iic, cpu_iic);
#define IIC_NODE_COUNT 2
static struct irq_host *iic_host;
@@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
static void iic_eoi(unsigned int irq)
{
- struct iic *iic = &__get_cpu_var(iic);
+ struct iic *iic = &__get_cpu_var(cpu_iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
BUG_ON(iic->eoi_ptr < 0);
}
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
struct iic *iic;
unsigned int virq;
- iic = &__get_cpu_var(iic);
+ iic = &__get_cpu_var(cpu_iic);
*(unsigned long *) &pending =
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
+ out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
{
- return per_cpu(iic, cpu).target_id;
+ return per_cpu(cpu_iic, cpu).target_id;
}
EXPORT_SYMBOL_GPL(iic_get_target_id);
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
void iic_cause_IPI(int cpu, int mesg)
{
- out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
+ out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
}
struct irq_host *iic_get_irq_host(int node)
@@ -237,7 +237,7 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
goto out_eoi;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
/* XXX FIXME: should locate the linux CPU number from the HW cpu
* number properly. We are lucky for now
*/
- struct iic *iic = &per_cpu(iic, hw_cpu);
+ struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
BUG_ON(iic->regs == NULL);
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 07762259c60a..86c4b29eea89 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
struct irq_desc *desc = irq_to_desc(irq);
if (desc && desc->chip && desc->chip->startup) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip->startup(irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a4619347aa7e..242f8095c2df 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
while (!timebase)
smp_rmb();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 937a544a236d..c5f3116b6ca5 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -54,7 +54,7 @@ struct dtl {
int buf_entries;
u64 last_idx;
};
-static DEFINE_PER_CPU(struct dtl, dtl);
+static DEFINE_PER_CPU(struct dtl, cpu_dtl);
/*
* Dispatch trace log event mask:
@@ -261,7 +261,7 @@ static int dtl_init(void)
/* set up the per-cpu log structures */
for_each_possible_cpu(i) {
- struct dtl *dtl = &per_cpu(dtl, i);
+ struct dtl *dtl = &per_cpu(cpu_dtl, i);
dtl->cpu = i;
rc = dtl_setup_file(dtl);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 7d01b58f3989..b9b9e11609ec 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void)
|| desc->chip->set_affinity == NULL)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
@@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void)
cpumask_setall(irq_to_desc(virq)->affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 62e50258cdef..c6e11b077108 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 intr_index;
u32 have_shift = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (desc->chip->mask_ack)
desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
break;
}
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int __devinit fsl_of_msi_probe(struct of_device *dev,
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 7d10074b3304..6f220a913e42 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
int src;
int subvirq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->mask(virq);
else
desc->chip->mask_ack(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
generic_handle_irq(subvirq);
uic_irq_ret:
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 495589950dc7..5c91995b74e4 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev)
return appldata_restore(dev);
}
-static struct dev_pm_ops appldata_pm_ops = {
+static const struct dev_pm_ops appldata_pm_ops = {
.freeze = appldata_freeze,
.thaw = appldata_thaw,
.restore = appldata_restore,
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..a587907d77f3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
* (the type definitions are in asm/spinlock_types.h)
*/
-#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) \
- _raw_spin_relax(lock); } while (0)
+#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) \
+ arch_spin_relax(lock); } while (0)
-extern void _raw_spin_lock_wait(raw_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *);
-extern void _raw_spin_relax(raw_spinlock_t *lock);
+extern void arch_spin_lock_wait(arch_spinlock_t *);
+extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int arch_spin_trylock_retry(arch_spinlock_t *);
+extern void arch_spin_relax(arch_spinlock_t *lock);
-static inline void __raw_spin_lock(raw_spinlock_t *lp)
+static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait(lp);
+ arch_spin_lock_wait(lp);
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait_flags(lp, flags);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return 1;
- return _raw_spin_trylock_retry(lp);
+ return arch_spin_trylock_retry(lp);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
return _raw_write_trylock_retry(rw);
}
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de04..9c76656a0af0 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int owner_cpu;
-} __attribute__ ((aligned (4))) raw_spinlock_t;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 071c81f179ef..0168472b2fdf 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/sysctl.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -1178,7 +1179,7 @@ debug_get_uint(char *buf)
{
int rc;
- for(; isspace(*buf); buf++);
+ buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
rc = -EINVAL;
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..10754a375668 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
_raw_yield();
}
-void _raw_spin_lock_wait(raw_spinlock_t *lp)
+void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return;
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait);
+EXPORT_SYMBOL(arch_spin_lock_wait);
-void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
+void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
for (count = spin_retry; count > 0; count--) {
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return 1;
}
return 0;
}
-EXPORT_SYMBOL(_raw_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_spin_relax(raw_spinlock_t *lock)
+void arch_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
_raw_yield_cpu(~cpu);
}
-EXPORT_SYMBOL(_raw_spin_relax);
+EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
local_irq_disable();
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053fd..bdc0f3b6c56a 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
-#define __raw_spin_is_locked(x) ((x)->lock <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
*
* We make no fairness assumptions. They have a cost.
*/
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_lock \n\t"
+ "movli.l @%2, %0 ! arch_spin_lock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__ (
- "mov #1, %0 ! __raw_spin_unlock \n\t"
+ "mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
: "=&z" (tmp)
: "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_trylock \n\t"
+ "movli.l @%2, %0 ! arch_spin_trylock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((x)->lock > 0)
+#define arch_read_can_lock(x) ((x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_lock \n\t"
+ "movli.l @%1, %0 ! arch_read_lock \n\t"
"cmp/pl %0 \n\t"
"bf 1b \n\t"
"add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_unlock \n\t"
+ "movli.l @%1, %0 ! arch_read_unlock \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_write_lock \n\t"
+ "movli.l @%1, %0 ! arch_write_lock \n\t"
"cmp/hs %2, %0 \n\t"
"bf 1b \n\t"
"sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
- "mov.l %1, @%0 ! __raw_write_unlock \n\t"
+ "mov.l %1, @%0 ! arch_write_unlock \n\t"
:
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_read_trylock \n\t"
+ "movli.l @%2, %0 ! arch_read_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/pl %0 \n\t"
"bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return (oldval > 0);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_write_trylock \n\t"
+ "movli.l @%2, %0 ! arch_write_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/hs %3, %0 \n\t"
"bf 2f \n\t"
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60c..9b7560db06ca 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index e1913f28f418..d2d41d046657 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 33ac1a9ac881..108197ac0d56 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC64
select HAVE_SYSCALL_WRAPPERS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_SYSCALL_TRACEPOINTS
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 90d5fe223a74..9d3c889718ac 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -33,4 +33,18 @@ config FRAME_POINTER
depends on MCOUNT
default y
+config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict copy size checks"
+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+ ---help---
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time failures.
+
+ The copy_from_user() etc checks are there to help test if there
+ are sufficient security checks on the length argument of
+ the copy operation, by having gcc prove that the argument is
+ within bounds.
+
+ If unsure, or if you run an older (pre 4.4) gcc, say N.
+
endmenu
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
#include <asm/psr.h>
-#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
+#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "g2", "memory", "cc");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_lock(lock); \
+ __arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_unlock(lock); \
+ __arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
return res;
}
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = arch_read_trylock(lock); \
+ res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
* the spinner sections must be pre-V9 branches.
*/
-#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
+#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-#define __raw_spin_unlock_wait(lp) \
+#define arch_spin_unlock_wait(lp) \
do { rmb(); \
} while((lp)->lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0UL);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
return result;
}
-#define __raw_read_lock(p) arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p) arch_read_trylock(p)
-#define __raw_read_unlock(p) arch_read_unlock(p)
-#define __raw_write_lock(p) arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p) arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
-
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
-
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_lock(p) arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p) arch_read_trylock(p)
+#define arch_read_unlock(p) arch_read_unlock(p)
+#define arch_write_lock(p) arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p) arch_write_unlock(p)
+#define arch_write_trylock(p) arch_write_trylock(p)
+
+#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw) (!(rw)->lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned char lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b5..edf196ee4ef8 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
#ifdef __KERNEL__
extern void __memmove(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
})
#define __HAVE_ARCH_MEMCPY
-
-static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- extern void __copy_1page(void *, const void *);
-
- if(n <= 32) {
- __builtin_memcpy(to, from, n);
- } else if (((unsigned int) to & 7) != 0) {
- /* Destination is not aligned on the double-word boundary */
- __memcpy(to, from, n);
- } else {
- switch(n) {
- case PAGE_SIZE:
- __copy_1page(to, from);
- break;
- default:
- __memcpy(to, from, n);
- break;
- }
- }
- return to;
-}
-
-static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- __memcpy(to, from, n);
- return to;
-}
-
-#undef memcpy
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy((t),(f),(n)) : \
- __nonconstant_memcpy((t),(f),(n)))
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-
-static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
-{
- extern void bzero_1page(void *);
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c) {
- if(count == PAGE_SIZE)
- bzero_1page(s);
- else
- __bzero(s, count);
- } else {
- __memset(s, c, count);
- }
- return s;
-}
-
-static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c)
- __bzero(s, count);
- else
- __memset(s, c, count);
- return s;
-}
-
-static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
-{
- __memset(s, c, count);
- return s;
-}
-
-#undef memset
-#define memset(s, c, count) \
-(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
- __constant_c_and_count_memset((s), (c), (count)) : \
- __constant_c_memset((s), (c), (count))) \
- : __nonconstant_memset((s), (c), (count)))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17eb..9623bc213158 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
#include <asm/asi.h>
-extern void *__memset(void *,int,__kernel_size_t);
-
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *, const void *, __kernel_size_t);
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-extern void *__builtin_memset(void *,int,__kernel_size_t);
-
-static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if (!c) {
- __bzero(s, count);
- return s;
- } else
- return __memset(s, c, count);
-}
-
-#undef memset
-#define memset(s, c, count) \
-((__builtin_constant_p(count) && (count) <= 32) ? \
- __builtin_memset((s), (c), (count)) : \
- (__builtin_constant_p(c) ? \
- __constant_memset((s), (c), (count)) : \
- __memset((s), (c), (count))))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe407..7257ebb8f394 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac481034..489d2ba92bcb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
return __copy_user(to, (__force void __user *) from, n);
}
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ int sz = __compiletime_object_size(to);
+
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return -EFAULT;
+ }
+
if (n && __access_ok((unsigned long) from, n))
return __copy_user((__force void __user *) to, from, n);
else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c70..dbc141660994 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
*/
#ifdef __KERNEL__
+#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
extern int __get_user_bad(void);
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
extern unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
+ unsigned long ret = (unsigned long) -EFAULT;
+ int sz = __compiletime_object_size(to);
+
+ if (likely(sz == -1 || sz >= size)) {
+ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+ } else {
+ copy_from_user_overflow();
+ }
return ret;
}
#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d8d25bd97121..cb4b9bfd0d87 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -398,7 +398,7 @@
#define __NR_perf_event_open 327
#define __NR_recvmmsg 328
-#define NR_SYSCALLS 329
+#define NR_syscalls 329
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index ec9c7bc67d21..1504df8ddf70 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1294,7 +1294,7 @@ linux_sparc_syscall:
sethi %hi(PSR_SYSCALL), %l4
or %l0, %l4, %l0
/* Direct access to user regs, must faster. */
- cmp %g1, NR_SYSCALLS
+ cmp %g1, NR_syscalls
bgeu linux_sparc_ni_syscall
sll %g1, 2, %l4
ld [%l7 + %l4], %l7
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d3b1a3076569..29973daa9930 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <trace/syscall.h>
#include <asm/ftrace.h>
@@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data)
}
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned int sys_call_table[];
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr];
+}
+
+#endif
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index ce996f97855f..8d6882bb480a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
unsigned long flags;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
}
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
tick_ops->disable_irq();
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 3bc6527c95af..6716584e48ab 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
+ if ((unsigned long) p->addr & 0x3UL)
+ return -EILSEQ;
+
p->ainsn.insn[0] = *p->addr;
flushi(&p->ainsn.insn[0]);
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index cb3c72c45aab..e0ba898e30cf 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 938da19dc065..cdc91d919e93 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
+#include <linux/bootmem.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
@@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
static void mdesc_lmb_free(struct mdesc_handle *hp)
{
- unsigned int alloc_size, handle_size = hp->handle_size;
- unsigned long start, end;
+ unsigned int alloc_size;
+ unsigned long start;
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
- alloc_size = PAGE_ALIGN(handle_size);
-
- start = (unsigned long) hp;
- end = start + alloc_size;
-
- while (start < end) {
- struct page *p;
-
- p = virt_to_page(start);
- ClearPageReserved(p);
- __free_page(p);
- start += PAGE_SIZE;
- }
+ alloc_size = PAGE_ALIGN(hp->handle_size);
+ start = __pa(hp);
+ free_bootmem_late(start, alloc_size);
}
static struct mdesc_mem_ops lmb_mdesc_ops = {
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b129611590a4..f30f4a1ead23 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
- local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
+ __this_cpu_inc(per_cpu_var(alert_counter));
+ if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- local_set(&__get_cpu_var(alert_counter), 0);
+ __this_cpu_write(per_cpu_var(alert_counter), 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 881947e59e95..0a6f2d1798d1 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
int i;
/* Check address type match */
- if ((addr[0] ^ range[0]) & 0x03000000)
- return -EINVAL;
+ if (!((addr[0] ^ range[0]) & 0x03000000))
+ goto type_match;
+
+ /* Special exception, we can map a 64-bit address into
+ * a 32-bit range.
+ */
+ if ((addr[0] & 0x03000000) == 0x03000000 &&
+ (range[0] & 0x03000000) == 0x02000000)
+ goto type_match;
+
+ return -EINVAL;
+type_match:
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 4ae91dc2feb9..2f6524d1a817 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
+#include <trace/syscall.h>
#include <linux/compat.h>
#include <linux/elf.h>
@@ -37,6 +38,9 @@
#include <asm/cpudata.h>
#include <asm/cacheflush.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
#include "entry.h"
/* #define ALLOW_INIT_TRACING */
@@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->u_regs[UREG_G1]);
+
if (unlikely(current->audit_context) && !ret)
audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
AUDIT_ARCH_SPARC :
@@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->u_regs[UREG_G1]);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d150c2aa98d2..dc4a458f74dc 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
#endif
.align 32
1: ldx [%g6 + TI_FLAGS], %l5
- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
be,pt %icc, rtrap
nop
call syscall_trace_leave
@@ -187,7 +187,7 @@ linux_syscall_trace:
.globl linux_sparc_syscall32
linux_sparc_syscall32:
/* Direct access to user regs, much faster. */
- cmp %g1, NR_SYSCALLS ! IEU1 Group
+ cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
srl %i0, 0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
@@ -198,7 +198,7 @@ linux_sparc_syscall32:
srl %i5, 0, %o5 ! IEU1
srl %i2, 0, %o2 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
call %l7 ! CTI Group brk forced
@@ -210,7 +210,7 @@ linux_sparc_syscall32:
.globl linux_sparc_syscall
linux_sparc_syscall:
/* Direct access to user regs, much faster. */
- cmp %g1, NR_SYSCALLS ! IEU1 Group
+ cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
mov %i0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
@@ -221,7 +221,7 @@ linux_sparc_syscall:
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
@@ -245,7 +245,7 @@ ret_sys_call:
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
80:
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
@@ -260,7 +260,7 @@ ret_sys_call:
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
sub %g0, %o0, %o0
or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 63f73ae8a892..67e165102885 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void)
static struct clocksource clocksource_tick = {
.rating = 100,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init setup_clockevent_multiplier(unsigned long hz)
-{
- unsigned long mult, shift = 32;
-
- while (1) {
- mult = div_sc(hz, NSEC_PER_SEC, shift);
- if (mult && (mult >> 32UL) == 0UL)
- break;
-
- shift--;
- }
-
- sparc64_clockevent.shift = shift;
- sparc64_clockevent.mult = mult;
-}
-
static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
@@ -828,9 +811,7 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
- clocksource_tick.mult =
- clocksource_hz2mult(freq,
- clocksource_tick.shift);
+ clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
printk("clocksource: mult[%x] shift[%d]\n",
@@ -839,15 +820,14 @@ void __init time_init(void)
clocksource_register(&clocksource_tick);
sparc64_clockevent.name = tick_ops->name;
-
- setup_clockevent_multiplier(freq);
+ clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
- printk("clockevent: mult[%ux] shift[%d]\n",
+ printk("clockevent: mult[%x] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
setup_sparc64_timer();
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 6b1e6cde6fff..f8514e291e15 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -17,8 +17,7 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
-
-/* #define DEBUG_MNA */
+#include <linux/perf_event.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
@@ -29,12 +28,6 @@ enum direction {
invalid,
};
-#ifdef DEBUG_MNA
-static char *dirstrings[] = {
- "load", "store", "both", "fpload", "fpstore", "invalid"
-};
-#endif
-
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
@@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
unsigned long addr = compute_effective_address(regs, insn);
int err;
-#ifdef DEBUG_MNA
- printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
- regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
-#endif
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
addr = compute_effective_address(regs, insn);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 379209982a07..378ca82b9ccc 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -20,10 +20,9 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
+#include <linux/perf_event.h>
#include <asm/fpumacro.h>
-/* #define DEBUG_MNA */
-
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
@@ -33,12 +32,6 @@ enum direction {
invalid,
};
-#ifdef DEBUG_MNA
-static char *dirstrings[] = {
- "load", "store", "both", "fpload", "fpstore", "invalid"
-};
-#endif
-
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
@@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
-#ifdef DEBUG_MNA
- printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
- "retpc[%016lx]\n",
- regs->tpc, dirstrings[dir], addr, size,
- regs->u_regs[UREG_RETPC]);
-#endif
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
@@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
@@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
@@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
@@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index d231cbd5c526..9dfd2ebcb157 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
+#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
@@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
BUG_ON(regs->tstate & TSTATE_PRIV);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index e75faf0e59ae..c4b5e03af115 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -44,3 +44,4 @@ obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
+obj-y += usercopy.o
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index b6557297440f..615f401edf69 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -6,10 +6,6 @@
.text
- .globl __memset
- .type __memset, #function
-__memset: /* %o0=buf, %o1=pat, %o2=len */
-
.globl memset
.type memset, #function
memset: /* %o0=buf, %o1=pat, %o2=len */
@@ -83,7 +79,6 @@ __bzero_done:
retl
mov %o3, %o0
.size __bzero, .-__bzero
- .size __memset, .-__memset
.size memset, .-memset
#define EX_ST(x,y) \
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 77f228533d47..3632cb34e914 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -560,7 +560,7 @@ __csum_partial_copy_end:
mov %i0, %o1
mov %i1, %o0
5:
- call __memcpy
+ call memcpy
mov %i2, %o2
tst %o0
bne,a 2f
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 704b12668388..1b30bb3bfdb1 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
@@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(bzero_1page);
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 7ce9c65f3592..24b8b12deed2 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -64,8 +64,9 @@ mcount:
2: sethi %hi(softirq_stack), %g3
or %g3, %lo(softirq_stack), %g3
ldx [%g3 + %g1], %g7
+ sub %g7, STACK_BIAS, %g7
cmp %sp, %g7
- bleu,pt %xcc, 2f
+ bleu,pt %xcc, 3f
sethi %hi(THREAD_SIZE), %g3
add %g7, %g3, %g7
cmp %sp, %g7
@@ -75,7 +76,7 @@ mcount:
* again, we are already trying to output the stack overflow
* message.
*/
- sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
+3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
or %g7, %lo(ovstack), %g7
add %g7, OVSTACKSIZE, %g3
sub %g3, STACK_BIAS + 192, %g3
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ce10bc869af9..34fe65751737 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -543,9 +543,6 @@ FUNC(memmove)
b 3f
add %o0, 2, %o0
-#ifdef __KERNEL__
-FUNC(__memcpy)
-#endif
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
sub %o0, %o1, %o4
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index 1c37ea892deb..99c017be8719 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -60,11 +60,10 @@
.globl __bzero_begin
__bzero_begin:
- .globl __bzero, __memset,
+ .globl __bzero
.globl memset
.globl __memset_start, __memset_end
__memset_start:
-__memset:
memset:
and %o1, 0xff, %g3
sll %g3, 8, %g2
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/sparc/lib/usercopy.c
@@ -0,0 +1,8 @@
+#include <linux/module.h>
+#include <linux/bug.h>
+
+void copy_from_user_overflow(void)
+{
+ WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index e13f65da17df..a3fccde894ec 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -67,6 +67,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
@@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
int retcode = 0; /* assume all succeed */
unsigned long insn;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 6863c9bde25c..56d2c44747b8 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
@@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 43b0da96a4fb..6081936bf03b 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -31,13 +31,12 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs)
+static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
- if (!user_mode(regs)) {
+ if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
@@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
}
return ret;
}
-#else
-static inline int notify_page_fault(struct pt_regs *regs)
-{
- return 0;
-}
-#endif
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
@@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs);
}
-static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
{
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
@@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
return insn;
}
-static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
- unsigned int insn, unsigned long address)
+static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
+ int fault_code, unsigned int insn,
+ unsigned long address)
{
unsigned char asi = ASI_P;
@@ -225,7 +219,7 @@ cannot_handle:
unhandled_fault (address, current, regs);
}
-static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
+static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
{
static int times;
@@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
show_regs(regs);
}
-static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
- unsigned long addr)
+static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+ unsigned long addr)
{
static int times;
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e14629c87de4..51069245b79a 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -6,6 +6,7 @@
#include <linux/console.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mm.h>
@@ -131,7 +132,7 @@ void mconsole_proc(struct mc_request *req)
char *ptr = req->request.data, *buf;
ptr += strlen("proc");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
proc = get_fs_type("proc");
if (proc == NULL) {
@@ -212,8 +213,7 @@ void mconsole_proc(struct mc_request *req)
char *ptr = req->request.data;
ptr += strlen("proc");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
snprintf(path, sizeof(path), "/proc/%s", ptr);
fd = sys_open(path, 0, 0);
@@ -560,8 +560,7 @@ void mconsole_config(struct mc_request *req)
int err;
ptr += strlen("config");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad configuration option", 1, 0);
@@ -588,7 +587,7 @@ void mconsole_remove(struct mc_request *req)
int err, start, end, n;
ptr += strlen("remove");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad remove option", 1, 0);
@@ -712,7 +711,7 @@ void mconsole_sysrq(struct mc_request *req)
char *ptr = req->request.data;
ptr += strlen("sysrq");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
/*
* With 'b', the system will shut down without a chance to reply,
@@ -757,8 +756,7 @@ void mconsole_stack(struct mc_request *req)
*/
ptr += strlen("stack");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
/*
* Should really check for multiple pids or reject bad args here
@@ -833,8 +831,8 @@ static int __init mconsole_init(void)
__initcall(mconsole_init);
-static int write_proc_mconsole(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t mconsole_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char *buf;
@@ -855,6 +853,11 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations mconsole_proc_fops = {
+ .owner = THIS_MODULE,
+ .write = mconsole_proc_write,
+};
+
static int create_proc_mconsole(void)
{
struct proc_dir_entry *ent;
@@ -862,15 +865,12 @@ static int create_proc_mconsole(void)
if (notify_socket == NULL)
return 0;
- ent = create_proc_entry("mconsole", S_IFREG | 0200, NULL);
+ ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops);
if (ent == NULL) {
printk(KERN_INFO "create_proc_mconsole : create_proc_entry "
"failed\n");
return 0;
}
-
- ent->read_proc = NULL;
- ent->write_proc = write_proc_mconsole;
return 0;
}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 635d16d90a80..5ff554677f40 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -27,6 +27,7 @@
#include "linux/init.h"
#include "linux/cdrom.h"
#include "linux/proc_fs.h"
+#include "linux/seq_file.h"
#include "linux/ctype.h"
#include "linux/capability.h"
#include "linux/mm.h"
@@ -200,23 +201,25 @@ static void make_proc_ide(void)
proc_ide = proc_mkdir("ide0", proc_ide_root);
}
-static int proc_ide_read_media(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int fake_ide_media_proc_show(struct seq_file *m, void *v)
{
- int len;
-
- strcpy(page, "disk\n");
- len = strlen("disk\n");
- len -= off;
- if (len < count){
- *eof = 1;
- if (len <= 0) return 0;
- }
- else len = count;
- *start = page + off;
- return len;
+ seq_puts(m, "disk\n");
+ return 0;
+}
+
+static int fake_ide_media_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fake_ide_media_proc_show, NULL);
}
+static const struct file_operations fake_ide_media_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = fake_ide_media_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void make_ide_entries(const char *dev_name)
{
struct proc_dir_entry *dir, *ent;
@@ -227,11 +230,8 @@ static void make_ide_entries(const char *dev_name)
dir = proc_mkdir(dev_name, proc_ide);
if(!dir) return;
- ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir);
+ ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops);
if(!ent) return;
- ent->data = NULL;
- ent->read_proc = proc_ide_read_media;
- ent->write_proc = NULL;
snprintf(name, sizeof(name), "ide0/%s", dev_name);
proc_symlink(dev_name, proc_ide_root, name);
}
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 6540d2c9fbb7..829df49dee99 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -6,7 +6,9 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/uaccess.h>
@@ -16,30 +18,26 @@
*/
int uml_exitcode = 0;
-static int read_proc_exitcode(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int exitcode_proc_show(struct seq_file *m, void *v)
{
- int len, val;
+ int val;
/*
* Save uml_exitcode in a local so that we don't need to guarantee
* that sprintf accesses it atomically.
*/
val = uml_exitcode;
- len = sprintf(page, "%d\n", val);
- len -= off;
- if (len <= off+count)
- *eof = 1;
- *start = page + off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+ seq_printf(m, "%d\n", val);
+ return 0;
+}
+
+static int exitcode_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, exitcode_proc_show, NULL);
}
-static int write_proc_exitcode(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t exitcode_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
int tmp;
@@ -55,20 +53,25 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations exitcode_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = exitcode_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = exitcode_proc_write,
+};
+
static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
- ent = create_proc_entry("exitcode", 0600, NULL);
+ ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
-
- ent->read_proc = read_proc_exitcode;
- ent->write_proc = write_proc_exitcode;
-
return 0;
}
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 039270b9b73b..89474ba0741e 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 4a28a1568d85..2f910a1b7454 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -9,11 +9,13 @@
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/personality.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/tick.h>
#include <linux/threads.h>
#include <asm/current.h>
@@ -336,16 +338,19 @@ int get_using_sysemu(void)
return atomic_read(&using_sysemu);
}
-static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
+static int sysemu_proc_show(struct seq_file *m, void *v)
{
- if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
- /* No overflow */
- *eof = 1;
+ seq_printf(m, "%d\n", get_using_sysemu());
+ return 0;
+}
- return strlen(buf);
+static int sysemu_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sysemu_proc_show, NULL);
}
-static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
+static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
char tmp[2];
@@ -358,13 +363,22 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned
return count;
}
+static const struct file_operations sysemu_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sysemu_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = sysemu_proc_write,
+};
+
int __init make_proc_sysemu(void)
{
struct proc_dir_entry *ent;
if (!sysemu_supported)
return 0;
- ent = create_proc_entry("sysemu", 0600, NULL);
+ ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
if (ent == NULL)
{
@@ -372,9 +386,6 @@ int __init make_proc_sysemu(void)
return 0;
}
- ent->read_proc = proc_read_sysemu;
- ent->write_proc = proc_write_sysemu;
-
return 0;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 32a1918e1b88..3b2a5aca4edb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2012,18 +2012,9 @@ config SCx200HR_TIMER
processor goes idle (as is done by the scheduler). The
other workaround is idle=poll boot option.
-config GEODE_MFGPT_TIMER
- def_bool y
- prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
- depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
- ---help---
- This driver provides a clock event source based on the MFGPT
- timer(s) in the CS5535 and CS5536 companion chip for the geode.
- MFGPTs have a better resolution and max interval than the
- generic PIT, and are suitable for use as high-res timers.
-
config OLPC
bool "One Laptop Per Child support"
+ select GPIOLIB
default n
---help---
Add support for detecting the unique features of the OLPC
diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h
index ad3c2ed75481..7cd73552a4e8 100644
--- a/arch/x86/include/asm/geode.h
+++ b/arch/x86/include/asm/geode.h
@@ -12,160 +12,7 @@
#include <asm/processor.h>
#include <linux/io.h>
-
-/* Generic southbridge functions */
-
-#define GEODE_DEV_PMS 0
-#define GEODE_DEV_ACPI 1
-#define GEODE_DEV_GPIO 2
-#define GEODE_DEV_MFGPT 3
-
-extern int geode_get_dev_base(unsigned int dev);
-
-/* Useful macros */
-#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
-#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
-#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
-#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
-
-/* MSRS */
-
-#define MSR_GLIU_P2D_RO0 0x10000029
-
-#define MSR_LX_GLD_MSR_CONFIG 0x48002001
-#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
- * sheet has the wrong value */
-#define MSR_GLCP_SYS_RSTPLL 0x4C000014
-#define MSR_GLCP_DOTPLL 0x4C000015
-
-#define MSR_LBAR_SMB 0x5140000B
-#define MSR_LBAR_GPIO 0x5140000C
-#define MSR_LBAR_MFGPT 0x5140000D
-#define MSR_LBAR_ACPI 0x5140000E
-#define MSR_LBAR_PMS 0x5140000F
-
-#define MSR_DIVIL_SOFT_RESET 0x51400017
-
-#define MSR_PIC_YSEL_LOW 0x51400020
-#define MSR_PIC_YSEL_HIGH 0x51400021
-#define MSR_PIC_ZSEL_LOW 0x51400022
-#define MSR_PIC_ZSEL_HIGH 0x51400023
-#define MSR_PIC_IRQM_LPC 0x51400025
-
-#define MSR_MFGPT_IRQ 0x51400028
-#define MSR_MFGPT_NR 0x51400029
-#define MSR_MFGPT_SETUP 0x5140002B
-
-#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
-
-#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
-#define MSR_GX_MSR_PADSEL 0xC0002011
-
-/* Resource Sizes */
-
-#define LBAR_GPIO_SIZE 0xFF
-#define LBAR_MFGPT_SIZE 0x40
-#define LBAR_ACPI_SIZE 0x40
-#define LBAR_PMS_SIZE 0x80
-
-/* ACPI registers (PMS block) */
-
-/*
- * PM1_EN is only valid when VSA is enabled for 16 bit reads.
- * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
- * with a 32 bit read at offset 0x0
- */
-
-#define PM1_STS 0x00
-#define PM1_EN 0x02
-#define PM1_CNT 0x08
-#define PM2_CNT 0x0C
-#define PM_TMR 0x10
-#define PM_GPE0_STS 0x18
-#define PM_GPE0_EN 0x1C
-
-/* PMC registers (PMS block) */
-
-#define PM_SSD 0x00
-#define PM_SCXA 0x04
-#define PM_SCYA 0x08
-#define PM_OUT_SLPCTL 0x0C
-#define PM_SCLK 0x10
-#define PM_SED 0x1
-#define PM_SCXD 0x18
-#define PM_SCYD 0x1C
-#define PM_IN_SLPCTL 0x20
-#define PM_WKD 0x30
-#define PM_WKXD 0x34
-#define PM_RD 0x38
-#define PM_WKXA 0x3C
-#define PM_FSD 0x40
-#define PM_TSD 0x44
-#define PM_PSD 0x48
-#define PM_NWKD 0x4C
-#define PM_AWKD 0x50
-#define PM_SSC 0x54
-
-/* VSA2 magic values */
-
-#define VSA_VRC_INDEX 0xAC1C
-#define VSA_VRC_DATA 0xAC1E
-#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
-#define VSA_VR_SIGNATURE 0x0003
-#define VSA_VR_MEM_SIZE 0x0200
-#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
-#define GSW_VSA_SIG 0x534d /* General Software signature */
-/* GPIO */
-
-#define GPIO_OUTPUT_VAL 0x00
-#define GPIO_OUTPUT_ENABLE 0x04
-#define GPIO_OUTPUT_OPEN_DRAIN 0x08
-#define GPIO_OUTPUT_INVERT 0x0C
-#define GPIO_OUTPUT_AUX1 0x10
-#define GPIO_OUTPUT_AUX2 0x14
-#define GPIO_PULL_UP 0x18
-#define GPIO_PULL_DOWN 0x1C
-#define GPIO_INPUT_ENABLE 0x20
-#define GPIO_INPUT_INVERT 0x24
-#define GPIO_INPUT_FILTER 0x28
-#define GPIO_INPUT_EVENT_COUNT 0x2C
-#define GPIO_READ_BACK 0x30
-#define GPIO_INPUT_AUX1 0x34
-#define GPIO_EVENTS_ENABLE 0x38
-#define GPIO_LOCK_ENABLE 0x3C
-#define GPIO_POSITIVE_EDGE_EN 0x40
-#define GPIO_NEGATIVE_EDGE_EN 0x44
-#define GPIO_POSITIVE_EDGE_STS 0x48
-#define GPIO_NEGATIVE_EDGE_STS 0x4C
-
-#define GPIO_MAP_X 0xE0
-#define GPIO_MAP_Y 0xE4
-#define GPIO_MAP_Z 0xE8
-#define GPIO_MAP_W 0xEC
-
-static inline u32 geode_gpio(unsigned int nr)
-{
- BUG_ON(nr > 28);
- return 1 << nr;
-}
-
-extern void geode_gpio_set(u32, unsigned int);
-extern void geode_gpio_clear(u32, unsigned int);
-extern int geode_gpio_isset(u32, unsigned int);
-extern void geode_gpio_setup_event(unsigned int, int, int);
-extern void geode_gpio_set_irq(unsigned int, unsigned int);
-
-static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
-{
- geode_gpio_setup_event(gpio, pair, 0);
-}
-
-static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
-{
- geode_gpio_setup_event(gpio, pair, 1);
-}
-
-/* Specific geode tests */
+#include <linux/cs5535.h>
static inline int is_geode_gx(void)
{
@@ -186,68 +33,4 @@ static inline int is_geode(void)
return (is_geode_gx() || is_geode_lx());
}
-#ifdef CONFIG_MGEODE_LX
-extern int geode_has_vsa2(void);
-#else
-static inline int geode_has_vsa2(void)
-{
- return 0;
-}
-#endif
-
-/* MFGPTs */
-
-#define MFGPT_MAX_TIMERS 8
-#define MFGPT_TIMER_ANY (-1)
-
-#define MFGPT_DOMAIN_WORKING 1
-#define MFGPT_DOMAIN_STANDBY 2
-#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
-
-#define MFGPT_CMP1 0
-#define MFGPT_CMP2 1
-
-#define MFGPT_EVENT_IRQ 0
-#define MFGPT_EVENT_NMI 1
-#define MFGPT_EVENT_RESET 3
-
-#define MFGPT_REG_CMP1 0
-#define MFGPT_REG_CMP2 2
-#define MFGPT_REG_COUNTER 4
-#define MFGPT_REG_SETUP 6
-
-#define MFGPT_SETUP_CNTEN (1 << 15)
-#define MFGPT_SETUP_CMP2 (1 << 14)
-#define MFGPT_SETUP_CMP1 (1 << 13)
-#define MFGPT_SETUP_SETUP (1 << 12)
-#define MFGPT_SETUP_STOPEN (1 << 11)
-#define MFGPT_SETUP_EXTEN (1 << 10)
-#define MFGPT_SETUP_REVEN (1 << 5)
-#define MFGPT_SETUP_CLKSEL (1 << 4)
-
-static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
- outw(value, base + reg + (timer * 8));
-}
-
-static inline u16 geode_mfgpt_read(int timer, u16 reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
- return inw(base + reg + (timer * 8));
-}
-
-extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
-extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
-extern int geode_mfgpt_alloc_timer(int timer, int domain);
-
-#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
-#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
-
-#ifdef CONFIG_GEODE_MFGPT_TIMER
-extern int __init mfgpt_timer_setup(void);
-#else
-static inline int mfgpt_timer_setup(void) { return 0; }
-#endif
-
#endif /* _ASM_X86_GEODE_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6a635bd39867..4611f085cd43 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -113,7 +113,7 @@
*/
#define LOCAL_PENDING_VECTOR 0xec
-#define UV_BAU_MESSAGE 0xec
+#define UV_BAU_MESSAGE 0xea
/*
* Self IPI vector for machine checks
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 5bef931f8b14..2d228fc9b4b7 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -244,6 +244,9 @@ do { \
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
+struct msr *msrs_alloc(void);
+void msrs_free(struct msr *msrs);
+
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 834a30295fab..3a57385d9fa7 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits);
/* GPIO assignments */
-#define OLPC_GPIO_MIC_AC geode_gpio(1)
+#define OLPC_GPIO_MIC_AC 1
#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
#define OLPC_GPIO_SMB_CLK geode_gpio(14)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859c..dd59a85a918f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
+static inline int arch_spin_is_locked(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
}
-static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
+static inline int arch_spin_is_contended(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
+static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
-static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
+static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
unsigned long flags)
{
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}
-static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
+static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}
-static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
+static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da0..b1e70d51e40c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
phys_addr_t phys, pgprot_t flags);
};
-struct raw_spinlock;
+struct arch_spinlock;
struct pv_lock_ops {
- int (*spin_is_locked)(struct raw_spinlock *lock);
- int (*spin_is_contended)(struct raw_spinlock *lock);
- void (*spin_lock)(struct raw_spinlock *lock);
- void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct raw_spinlock *lock);
- void (*spin_unlock)(struct raw_spinlock *lock);
+ int (*spin_is_locked)(struct arch_spinlock *lock);
+ int (*spin_is_contended)(struct arch_spinlock *lock);
+ void (*spin_lock)(struct arch_spinlock *lock);
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
};
/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36defeb7..0c44196b78ac 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
#define percpu_to_op(op, var, val) \
do { \
- typedef typeof(var) T__; \
+ typedef typeof(var) pto_T__; \
if (0) { \
- T__ tmp__; \
- tmp__ = (val); \
+ pto_T__ pto_tmp__; \
+ pto_tmp__ = (val); \
} \
switch (sizeof(var)) { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
- : "qi" ((T__)(val))); \
+ : "qi" ((pto_T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
- : "re" ((T__)(val))); \
+ : "re" ((pto_T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
@@ -106,31 +106,31 @@ do { \
#define percpu_from_op(op, var, constraint) \
({ \
- typeof(var) ret__; \
+ typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
- : "=q" (ret__) \
+ : "=q" (pfo_ret__) \
: constraint); \
break; \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
default: __bad_percpu_size(); \
} \
- ret__; \
+ pfo_ret__; \
})
/*
@@ -153,6 +153,84 @@ do { \
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+
+#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+/*
+ * Per cpu atomic 64 bit operations are only available under 64 bit.
+ * 32 bit must fall back to generic operations.
+ */
+#ifdef CONFIG_X86_64
+#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#endif
+
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321db..3089f70c0c52 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
#if (NR_CPUS < 256)
#define TICKET_SHIFT 8
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
short inc = 0x0100;
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp, new;
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
#else
#define TICKET_SHIFT 16
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int inc = 0x00010000;
int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp;
int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
}
#endif
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c87091..dcb48b2edc11 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,16 +5,16 @@
# error "please don't include this file directly"
#endif
-typedef struct raw_spinlock {
+typedef struct arch_spinlock {
unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 40e37b10c6c0..c5087d796587 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -35,11 +35,16 @@
# endif
#endif
-/* Node not present */
-#define NUMA_NO_NODE (-1)
+/*
+ * to preserve the visibility of NUMA_NO_NODE definition,
+ * moved to there from here. May be used independent of
+ * CONFIG_NUMA.
+ */
+#include <linux/numa.h>
#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 90f06c25221d..cb507bb05d79 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -16,7 +16,6 @@ extern unsigned long initial_code;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
-#define TRAMPOLINE_BASE 0x6000
extern unsigned long setup_trampoline(void);
extern void __init reserve_trampoline_memory(void);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4f2e66e29ecc..d87f09bc5a52 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o
-obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index e0dfb6856aa2..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void)
* or BIOS forget to put that in reserved.
* try to update e820 to make that region as reserved.
*/
- int i, fix, slot;
+ u32 agp_aper_base = 0, agp_aper_order = 0;
+ int i, fix, slot, valid_agp = 0;
u32 ctl;
u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base = 0, last_aper_base = 0;
@@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void)
return;
/* This is mostly duplicate of iommu_hole_init */
+ agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
+
fix = 0;
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
@@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void)
}
}
- if (!fix)
+ if (valid_agp)
return;
- /* different nodes have different setting, disable them all at first*/
+ /* disable them all at first */
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
@@ -458,8 +461,6 @@ out:
if (aper_alloc) {
/* Got the aperture from the AGP bridge */
- } else if (!valid_agp) {
- /* Do nothing */
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index efb2b9cd132c..aa57c079c98f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1341,7 +1341,7 @@ void enable_x2apic(void)
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
- pr_info("Enabling x2apic\n");
+ printk_once(KERN_INFO "Enabling x2apic\n");
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
}
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d5d498fbee4b..11a5851f1f50 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
continue;
cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
}
__get_cpu_var(vector_irq)[vector] = -1;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
irq_exit();
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432a9dbf..0159a69396cb 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
*/
static DEFINE_PER_CPU(unsigned, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(per_cpu_var(alert_counter));
+ if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- local_set(&__get_cpu_var(alert_counter), 0);
+ __this_cpu_write(per_cpu_var(alert_counter), 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index c965e5212714..468489b57aae 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
+ static bool printed;
if (c->cpuid_level < 0xb)
return;
@@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
+ if (!printed) {
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
+ if (c->x86_max_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
+ printed = 1;
+ }
return;
#endif
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7128b3799cec..8dc3ea145c97 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = nearby_node(apicid);
}
numa_set_node(cpu, node);
-
- printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1afa990a6c8..4868e4a951ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
+ static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
return;
@@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
- printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
goto out;
}
@@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
((1 << core_bits) - 1);
out:
- if ((c->x86_max_cores * smp_num_siblings) > 1) {
+ if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
+ printed = 1;
}
#endif
}
@@ -1093,7 +1095,7 @@ static void clear_all_debug_regs(void)
void __cpuinit cpu_init(void)
{
- struct orig_ist *orig_ist;
+ struct orig_ist *oist;
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
@@ -1102,7 +1104,7 @@ void __cpuinit cpu_init(void)
cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
- orig_ist = &per_cpu(orig_ist, cpu);
+ oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void)
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
panic("CPU#%d already initialized!\n", cpu);
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ pr_debug("Initializing CPU#%d\n", cpu);
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
@@ -1143,12 +1145,12 @@ void __cpuinit cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
- if (!orig_ist->ist[0]) {
+ if (!oist->ist[0]) {
char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks += exception_stack_sizes[v];
- orig_ist->ist[v] = t->x86_tss.ist[v] =
+ oist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
#include <asm/apic.h>
#include <asm/desc.h>
-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
-static DEFINE_PER_CPU(int, cpu_priv_count);
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
+static DEFINE_PER_CPU(int, cpud_priv_count);
static DEFINE_MUTEX(cpu_debug_lock);
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
/* Already intialized */
if (file == CPU_INDEX_BIT)
- if (per_cpu(cpu_arr[type].init, cpu))
+ if (per_cpu(cpud_arr[type].init, cpu))
return 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
priv->reg = reg;
priv->file = file;
mutex_lock(&cpu_debug_lock);
- per_cpu(priv_arr[type], cpu) = priv;
- per_cpu(cpu_priv_count, cpu)++;
+ per_cpu(cpud_priv_arr[type], cpu) = priv;
+ per_cpu(cpud_priv_count, cpu)++;
mutex_unlock(&cpu_debug_lock);
if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
dentry, (void *)priv, &cpu_fops);
else {
debugfs_create_file(cpu_base[type].name, S_IRUGO,
- per_cpu(cpu_arr[type].dentry, cpu),
+ per_cpu(cpud_arr[type].dentry, cpu),
(void *)priv, &cpu_fops);
mutex_lock(&cpu_debug_lock);
- per_cpu(cpu_arr[type].init, cpu) = 1;
+ per_cpu(cpud_arr[type].init, cpu) = 1;
mutex_unlock(&cpu_debug_lock);
}
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
continue;
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
- per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+ per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
if (type < CPU_TSS_BIT)
err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
err = cpu_init_allreg(cpu, cpu_dentry);
pr_info("cpu%d(%d) debug files %d\n",
- cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
- if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+ cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
+ if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
pr_err("Register files count %d exceeds limit %d\n",
- per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
- per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+ per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
+ per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
err = -ENFILE;
}
if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
debugfs_remove_recursive(cpu_debugfs_dir);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
- kfree(per_cpu(priv_arr[i], cpu));
+ for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
+ kfree(per_cpu(cpud_priv_arr[i], cpu));
}
module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index d2e7c77c1ea4..f28decf8dde3 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
};
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, old_perf);
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
+ switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
- perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
+ perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
- ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
- per_cpu(old_perf, cpu) = perf;
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
unsigned int freq;
unsigned int cached_freq;
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_verify\n");
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
- per_cpu(drv_data, cpu) = data;
+ per_cpu(acfreq_data, cpu) = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
- per_cpu(drv_data, cpu) = NULL;
+ per_cpu(acfreq_data, cpu) = NULL;
return result;
}
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_cpu_exit\n");
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
- per_cpu(drv_data, policy->cpu) = NULL;
+ per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_resume\n");
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c900b73f9224..9c31e8b09d2c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = cpu_to_node(cpu);
}
numa_set_node(cpu, node);
-
- printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6c40f6b5b340..fc6c8ef92dcc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,26 +499,27 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
#ifdef CONFIG_SYSFS
/* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
+static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
+#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
- int index_msb, i;
+ int index_msb, i, sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
- struct cpuinfo_x86 *d;
- for_each_online_cpu(i) {
- if (!per_cpu(cpuid4_info, i))
+ for_each_cpu(i, c->llc_shared_map) {
+ if (!per_cpu(ici_cpuid4_info, i))
continue;
- d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
- cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
- d->llc_shared_map);
+ for_each_cpu(sibling, c->llc_shared_map) {
+ if (!cpu_online(sibling))
+ continue;
+ set_bit(sibling, this_leaf->shared_cpu_map);
+ }
}
return;
}
@@ -535,7 +536,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
c->apicid >> index_msb) {
cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
- if (i != cpu && per_cpu(cpuid4_info, i)) {
+ if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
sibling_leaf =
CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +575,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
static int
@@ -614,15 +615,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0)
return -ENOENT;
- per_cpu(cpuid4_info, cpu) = kzalloc(
+ per_cpu(ici_cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return -ENOMEM;
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
if (retval) {
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
return retval;
@@ -634,7 +635,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
/* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, cache_kobject);
+static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
struct _index_kobject {
struct kobject kobj;
@@ -643,8 +644,8 @@ struct _index_kobject {
};
/* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
+static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
+#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
@@ -863,10 +864,10 @@ static struct kobj_type ktype_percpu_entry = {
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
- kfree(per_cpu(cache_kobject, cpu));
- kfree(per_cpu(index_kobject, cpu));
- per_cpu(cache_kobject, cpu) = NULL;
- per_cpu(index_kobject, cpu) = NULL;
+ kfree(per_cpu(ici_cache_kobject, cpu));
+ kfree(per_cpu(ici_index_kobject, cpu));
+ per_cpu(ici_cache_kobject, cpu) = NULL;
+ per_cpu(ici_index_kobject, cpu) = NULL;
free_cache_attributes(cpu);
}
@@ -882,14 +883,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return err;
/* Allocate all required memory */
- per_cpu(cache_kobject, cpu) =
+ per_cpu(ici_cache_kobject, cpu) =
kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
goto err_out;
- per_cpu(index_kobject, cpu) = kzalloc(
+ per_cpu(ici_index_kobject, cpu) = kzalloc(
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(per_cpu(index_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
goto err_out;
return 0;
@@ -913,7 +914,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0))
return retval;
- retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
+ retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
&sys_dev->kobj, "%s", "cache");
if (retval < 0) {
@@ -927,12 +928,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
- per_cpu(cache_kobject, cpu),
+ per_cpu(ici_cache_kobject, cpu),
"index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
return retval;
}
@@ -940,7 +941,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
- kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
+ kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
return 0;
}
@@ -949,7 +950,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return;
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
@@ -957,7 +958,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4fef985fc221..81c499eceb21 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
ack_APIC_irq();
}
+/* Thermal monitoring depends on APIC, ACPI and clock modulation */
+static int intel_thermal_supported(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has_apic)
+ return 0;
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ return 0;
+ return 1;
+}
+
void __init mcheck_intel_therm_init(void)
{
/*
@@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void)
* LVT value on BSP and use that value to restore APs' thermal LVT
* entry BIOS programmed later
*/
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
- cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
+ if (intel_thermal_supported(&boot_cpu_data))
lvtthmr_init = apic_read(APIC_LVTTHMR);
}
@@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
int tm2 = 0;
u32 l, h;
- /* Thermal monitoring depends on ACPI and clock modulation*/
- if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ if (!intel_thermal_supported(c))
return;
/*
@@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
+ printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
+ tm2 ? "TM2" : "TM1");
/* enable thermal throttle processing */
atomic_set(&therm_throt_en, 1);
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 3c1b12d461d1..e006e56f699c 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -4,6 +4,7 @@
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/init.h>
#define LINE_SIZE 80
@@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EINVAL;
base = simple_strtoull(line + 5, &ptr, 0);
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
if (strncmp(ptr, "size=", 5))
return -EINVAL;
@@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
size = simple_strtoull(ptr + 5, &ptr, 0);
if ((base & 0xfff) || (size & 0xfff))
return -EINVAL;
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
if (strncmp(ptr, "type=", 5))
return -EINVAL;
- ptr += 5;
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr + 5);
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
if (strcmp(ptr, mtrr_strings[i]))
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
int cpu;
};
-static DEFINE_PER_CPU(struct ds_context *, cpu_context);
+static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
{
struct ds_context **p_context =
- (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
+ (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
struct ds_context *context = NULL;
struct ds_context *new_context = NULL;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5d..0a0aa1cec8f1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
}
EXPORT_SYMBOL(dump_stack);
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
- if (!__raw_spin_trylock(&die_lock)) {
+ if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
- __raw_spin_lock(&die_lock);
+ arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
- __raw_spin_unlock(&die_lock);
+ arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d17d482a04f4..f50447d961c0 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -732,7 +732,16 @@ struct early_res {
char overlap_ok;
};
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
- { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
+ { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
+#ifdef CONFIG_X86_32
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
+#endif
+
{}
};
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
deleted file mode 100644
index 9b08e852fd1a..000000000000
--- a/arch/x86/kernel/geode_32.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * AMD Geode southbridge support code
- * Copyright (C) 2006, Advanced Micro Devices, Inc.
- * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <asm/msr.h>
-#include <asm/geode.h>
-
-static struct {
- char *name;
- u32 msr;
- int size;
- u32 base;
-} lbars[] = {
- { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
- { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
- { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
- { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
-};
-
-static void __init init_lbars(void)
-{
- u32 lo, hi;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lbars); i++) {
- rdmsr(lbars[i].msr, lo, hi);
- if (hi & 0x01)
- lbars[i].base = lo & 0x0000ffff;
-
- if (lbars[i].base == 0)
- printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
- lbars[i].name);
- }
-}
-
-int geode_get_dev_base(unsigned int dev)
-{
- BUG_ON(dev >= ARRAY_SIZE(lbars));
- return lbars[dev].base;
-}
-EXPORT_SYMBOL_GPL(geode_get_dev_base);
-
-/* === GPIO API === */
-
-void geode_gpio_set(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
-
- if (!base)
- return;
-
- /* low bank register */
- if (gpio & 0xFFFF)
- outl(gpio & 0xFFFF, base + reg);
- /* high bank register */
- gpio >>= 16;
- if (gpio)
- outl(gpio, base + 0x80 + reg);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_set);
-
-void geode_gpio_clear(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
-
- if (!base)
- return;
-
- /* low bank register */
- if (gpio & 0xFFFF)
- outl((gpio & 0xFFFF) << 16, base + reg);
- /* high bank register */
- gpio &= (0xFFFF << 16);
- if (gpio)
- outl(gpio, base + 0x80 + reg);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_clear);
-
-int geode_gpio_isset(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- u32 val;
-
- if (!base)
- return 0;
-
- /* low bank register */
- if (gpio & 0xFFFF) {
- val = inl(base + reg) & (gpio & 0xFFFF);
- if ((gpio & 0xFFFF) == val)
- return 1;
- }
- /* high bank register */
- gpio >>= 16;
- if (gpio) {
- val = inl(base + 0x80 + reg) & gpio;
- if (gpio == val)
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(geode_gpio_isset);
-
-void geode_gpio_set_irq(unsigned int group, unsigned int irq)
-{
- u32 lo, hi;
-
- if (group > 7 || irq > 15)
- return;
-
- rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
-
- lo &= ~(0xF << (group * 4));
- lo |= (irq & 0xF) << (group * 4);
-
- wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
-
-void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- u32 offset, shift, val;
-
- if (gpio >= 24)
- offset = GPIO_MAP_W;
- else if (gpio >= 16)
- offset = GPIO_MAP_Z;
- else if (gpio >= 8)
- offset = GPIO_MAP_Y;
- else
- offset = GPIO_MAP_X;
-
- shift = (gpio % 8) * 4;
-
- val = inl(base + offset);
-
- /* Clear whatever was there before */
- val &= ~(0xF << shift);
-
- /* And set the new value */
-
- val |= ((pair & 7) << shift);
-
- /* Set the PME bit if this is a PME event */
-
- if (pme)
- val |= (1 << (shift + 3));
-
- outl(val, base + offset);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
-
-int geode_has_vsa2(void)
-{
- static int has_vsa2 = -1;
-
- if (has_vsa2 == -1) {
- u16 val;
-
- /*
- * The VSA has virtual registers that we can query for a
- * signature.
- */
- outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
- outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
-
- val = inw(VSA_VRC_DATA);
- has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
- }
-
- return has_vsa2;
-}
-EXPORT_SYMBOL_GPL(geode_has_vsa2);
-
-static int __init geode_southbridge_init(void)
-{
- if (!is_geode())
- return -ENODEV;
-
- init_lbars();
- (void) mfgpt_timer_setup();
- return 0;
-}
-
-postcore_initcall(geode_southbridge_init);
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 4f8e2507e8f3..5051b94c9069 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
- reserve_trampoline_memory();
-
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 0b06cd778fd9..b5a9896ca1e7 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
{
copy_bootdata(__va(real_mode_data));
- reserve_trampoline_memory();
-
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 664bcb7384ac..91fd0c70a18a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -294,12 +294,12 @@ void fixup_irqs(void)
continue;
/* interrupt's are disabled at this point */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
continue;
}
@@ -326,7 +326,7 @@ void fixup_irqs(void)
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
desc->chip->unmask(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
}
}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
deleted file mode 100644
index 2a62d843f015..000000000000
--- a/arch/x86/kernel/mfgpt_32.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
- *
- * Copyright (C) 2006, Advanced Micro Devices, Inc.
- * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
- */
-
-/*
- * We are using the 32.768kHz input clock - it's the only one that has the
- * ranges we find desirable. The following table lists the suitable
- * divisors and the associated Hz, minimum interval and the maximum interval:
- *
- * Divisor Hz Min Delta (s) Max Delta (s)
- * 1 32768 .00048828125 2.000
- * 2 16384 .0009765625 4.000
- * 4 8192 .001953125 8.000
- * 8 4096 .00390625 16.000
- * 16 2048 .0078125 32.000
- * 32 1024 .015625 64.000
- * 64 512 .03125 128.000
- * 128 256 .0625 256.000
- * 256 128 .125 512.000
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <asm/geode.h>
-
-#define MFGPT_DEFAULT_IRQ 7
-
-static struct mfgpt_timer_t {
- unsigned int avail:1;
-} mfgpt_timers[MFGPT_MAX_TIMERS];
-
-/* Selected from the table above */
-
-#define MFGPT_DIVISOR 16
-#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
-#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
-#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
-
-/* Allow for disabling of MFGPTs */
-static int disable;
-static int __init mfgpt_disable(char *s)
-{
- disable = 1;
- return 1;
-}
-__setup("nomfgpt", mfgpt_disable);
-
-/* Reset the MFGPT timers. This is required by some broken BIOSes which already
- * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
- * affected at least (0.99 is OK with MFGPT workaround left to off).
- */
-static int __init mfgpt_fix(char *s)
-{
- u32 val, dummy;
-
- /* The following udocumented bit resets the MFGPT timers */
- val = 0xFF; dummy = 0;
- wrmsr(MSR_MFGPT_SETUP, val, dummy);
- return 1;
-}
-__setup("mfgptfix", mfgpt_fix);
-
-/*
- * Check whether any MFGPTs are available for the kernel to use. In most
- * cases, firmware that uses AMD's VSA code will claim all timers during
- * bootup; we certainly don't want to take them if they're already in use.
- * In other cases (such as with VSAless OpenFirmware), the system firmware
- * leaves timers available for us to use.
- */
-
-
-static int timers = -1;
-
-static void geode_mfgpt_detect(void)
-{
- int i;
- u16 val;
-
- timers = 0;
-
- if (disable) {
- printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
- goto done;
- }
-
- if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
- printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
- goto done;
- }
-
- for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
- val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
- if (!(val & MFGPT_SETUP_SETUP)) {
- mfgpt_timers[i].avail = 1;
- timers++;
- }
- }
-
-done:
- printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
-}
-
-int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
-{
- u32 msr, mask, value, dummy;
- int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
-
- if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
- return -EIO;
-
- /*
- * The register maps for these are described in sections 6.17.1.x of
- * the AMD Geode CS5536 Companion Device Data Book.
- */
- switch (event) {
- case MFGPT_EVENT_RESET:
- /*
- * XXX: According to the docs, we cannot reset timers above
- * 6; that is, resets for 7 and 8 will be ignored. Is this
- * a problem? -dilinger
- */
- msr = MSR_MFGPT_NR;
- mask = 1 << (timer + 24);
- break;
-
- case MFGPT_EVENT_NMI:
- msr = MSR_MFGPT_NR;
- mask = 1 << (timer + shift);
- break;
-
- case MFGPT_EVENT_IRQ:
- msr = MSR_MFGPT_IRQ;
- mask = 1 << (timer + shift);
- break;
-
- default:
- return -EIO;
- }
-
- rdmsr(msr, value, dummy);
-
- if (enable)
- value |= mask;
- else
- value &= ~mask;
-
- wrmsr(msr, value, dummy);
- return 0;
-}
-EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
-
-int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
-{
- u32 zsel, lpc, dummy;
- int shift;
-
- if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
- return -EIO;
-
- /*
- * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
- * is using the same CMP of the timer's Siamese twin, the IRQ is set to
- * 2, and we mustn't use nor change it.
- * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
- * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
- * with *irq==0 is safe. Currently there _are_ no 2 drivers.
- */
- rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
- shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
- if (((zsel >> shift) & 0xF) == 2)
- return -EIO;
-
- /* Choose IRQ: if none supplied, keep IRQ already set or use default */
- if (!*irq)
- *irq = (zsel >> shift) & 0xF;
- if (!*irq)
- *irq = MFGPT_DEFAULT_IRQ;
-
- /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
- if (*irq < 1 || *irq == 2 || *irq > 15)
- return -EIO;
- rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
- if (lpc & (1 << *irq))
- return -EIO;
-
- /* All chosen and checked - go for it */
- if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
- return -EIO;
- if (enable) {
- zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
- wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
- }
-
- return 0;
-}
-
-static int mfgpt_get(int timer)
-{
- mfgpt_timers[timer].avail = 0;
- printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
- return timer;
-}
-
-int geode_mfgpt_alloc_timer(int timer, int domain)
-{
- int i;
-
- if (timers == -1) {
- /* timers haven't been detected yet */
- geode_mfgpt_detect();
- }
-
- if (!timers)
- return -1;
-
- if (timer >= MFGPT_MAX_TIMERS)
- return -1;
-
- if (timer < 0) {
- /* Try to find an available timer */
- for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
- if (mfgpt_timers[i].avail)
- return mfgpt_get(i);
-
- if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
- break;
- }
- } else {
- /* If they requested a specific timer, try to honor that */
- if (mfgpt_timers[timer].avail)
- return mfgpt_get(timer);
- }
-
- /* No timers available - too bad */
- return -1;
-}
-EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
-
-
-#ifdef CONFIG_GEODE_MFGPT_TIMER
-
-/*
- * The MFPGT timers on the CS5536 provide us with suitable timers to use
- * as clock event sources - not as good as a HPET or APIC, but certainly
- * better than the PIT. This isn't a general purpose MFGPT driver, but
- * a simplified one designed specifically to act as a clock event source.
- * For full details about the MFGPT, please consult the CS5536 data sheet.
- */
-
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-
-static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
-static u16 mfgpt_event_clock;
-
-static int irq;
-static int __init mfgpt_setup(char *str)
-{
- get_option(&str, &irq);
- return 1;
-}
-__setup("mfgpt_irq=", mfgpt_setup);
-
-static void mfgpt_disable_timer(u16 clock)
-{
- /* avoid races by clearing CMP1 and CMP2 unconditionally */
- geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
- MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
-}
-
-static int mfgpt_next_event(unsigned long, struct clock_event_device *);
-static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
-
-static struct clock_event_device mfgpt_clockevent = {
- .name = "mfgpt-timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = mfgpt_set_mode,
- .set_next_event = mfgpt_next_event,
- .rating = 250,
- .cpumask = cpu_all_mask,
- .shift = 32
-};
-
-static void mfgpt_start_timer(u16 delta)
-{
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
-
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
-}
-
-static void mfgpt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- mfgpt_disable_timer(mfgpt_event_clock);
-
- if (mode == CLOCK_EVT_MODE_PERIODIC)
- mfgpt_start_timer(MFGPT_PERIODIC);
-
- mfgpt_tick_mode = mode;
-}
-
-static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- mfgpt_start_timer(delta);
- return 0;
-}
-
-static irqreturn_t mfgpt_tick(int irq, void *dev_id)
-{
- u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
-
- /* See if the interrupt was for us */
- if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
- return IRQ_NONE;
-
- /* Turn off the clock (and clear the event) */
- mfgpt_disable_timer(mfgpt_event_clock);
-
- if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
- return IRQ_HANDLED;
-
- /* Clear the counter */
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
-
- /* Restart the clock in periodic mode */
-
- if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
- }
-
- mfgpt_clockevent.event_handler(&mfgpt_clockevent);
- return IRQ_HANDLED;
-}
-
-static struct irqaction mfgptirq = {
- .handler = mfgpt_tick,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
- .name = "mfgpt-timer"
-};
-
-int __init mfgpt_timer_setup(void)
-{
- int timer, ret;
- u16 val;
-
- timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
- if (timer < 0) {
- printk(KERN_ERR
- "mfgpt-timer: Could not allocate a MFPGT timer\n");
- return -ENODEV;
- }
-
- mfgpt_event_clock = timer;
-
- /* Set up the IRQ on the MFGPT side */
- if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
- printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
- return -EIO;
- }
-
- /* And register it with the kernel */
- ret = setup_irq(irq, &mfgptirq);
-
- if (ret) {
- printk(KERN_ERR
- "mfgpt-timer: Unable to set up the interrupt.\n");
- goto err;
- }
-
- /* Set the clock scale and enable the event mode for CMP2 */
- val = MFGPT_SCALE | (3 << 8);
-
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
-
- /* Set up the clock event */
- mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
- mfgpt_clockevent.shift);
- mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
- &mfgpt_clockevent);
- mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
- &mfgpt_clockevent);
-
- printk(KERN_INFO
- "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
- timer, irq);
- clockevents_register_device(&mfgpt_clockevent);
-
- return 0;
-
-err:
- geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
- printk(KERN_ERR
- "mfgpt-timer: Unable to set up the MFGPT clock source\n");
- return -EIO;
-}
-
-#endif
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 35a57c963df9..40b54ceb68b5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void)
{
if (enable_update_mptable && alloc_mptable) {
u64 startt = 0;
-#ifdef CONFIG_X86_TRAMPOLINE
- startt = TRAMPOLINE_BASE;
-#endif
mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
}
}
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 4006c522adc7..9d1d263f786f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -212,7 +212,7 @@ static int __init olpc_init(void)
unsigned char *romsig;
/* The ioremap check is dangerous; limit what we run it on */
- if (!is_geode() || geode_has_vsa2())
+ if (!is_geode() || cs5535_has_vsa2())
return 0;
spin_lock_init(&ec_lock);
@@ -244,7 +244,7 @@ static int __init olpc_init(void)
(unsigned char *) &olpc_platform_info.ecver, 1);
/* check to see if the VSA exists */
- if (geode_has_vsa2())
+ if (cs5535_has_vsa2())
olpc_platform_info.flags |= OLPC_F_VSA;
printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082e..676b8c77a976 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,9 +8,9 @@
#include <asm/paravirt.h>
static inline void
-default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index afcc58b69c7c..fcc2f2bfa39c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -120,11 +120,14 @@ static void __init dma32_free_bootmem(void)
void __init pci_iommu_alloc(void)
{
+ int use_swiotlb;
+
+ use_swiotlb = pci_swiotlb_init();
#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
#endif
- if (pci_swiotlb_init())
+ if (use_swiotlb)
return;
gart_iommu_hole_init();
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index e6a0d402f171..56c0e730d3fe 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void)
struct pci_dev *dev;
int i;
- if (no_agp)
+ /* don't shutdown it if there is AGP installed */
+ if (!no_agp)
return;
for (i = 0; i < num_k8_northbridges; i++) {
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 201eab63b05f..fda313ebbb03 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <asm/reboot_fixups.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 946a311a25c9..f7b8b9894b22 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -73,6 +73,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
+#include <asm/trampoline.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p)
reserve_brk();
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+
+ reserve_trampoline_memory();
+
#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
@@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
- /*
- * Find and reserve possible boot-time SMP configuration:
- */
- find_smp_config();
-
#ifdef CONFIG_ACPI_NUMA
/*
* Parse SRAT to discover nodes.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 29e6744f51e3..678d0b8c26f3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+ static int current_node = -1;
+ int node = cpu_to_node(cpu);
+
+ if (system_state == SYSTEM_BOOTING) {
+ if (node != current_node) {
+ if (current_node > (-1))
+ pr_cont(" Ok.\n");
+ current_node = node;
+ pr_info("Booting Node %3d, Processors ", node);
+ }
+ pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+ return;
+ } else
+ pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+ node, cpu, apicid);
+}
+
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -737,9 +757,8 @@ do_rest:
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
- /* So we see what's up */
- printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
- cpu, apicid, start_ip);
+ /* So we see what's up */
+ announce_cpu(cpu, apicid);
/*
* This grunge runs the startup process for
@@ -788,21 +807,17 @@ do_rest:
udelay(100);
}
- if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
- /* number CPUs logically, starting from 1 (BSP is 0) */
- pr_debug("OK.\n");
- printk(KERN_INFO "CPU%d: ", cpu);
- print_cpu_info(&cpu_data(cpu));
- pr_debug("CPU has booted.\n");
- } else {
+ if (cpumask_test_cpu(cpu, cpu_callin_mask))
+ pr_debug("CPU%d: has booted.\n", cpu);
+ else {
boot_error = 1;
if (*((volatile unsigned char *)trampoline_base)
== 0xA5)
/* trampoline started but...? */
- printk(KERN_ERR "Stuck ??\n");
+ pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
- printk(KERN_ERR "Not responding.\n");
+ pr_err("CPU%d: Not responding.\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
@@ -1293,14 +1308,16 @@ void native_cpu_die(unsigned int cpu)
for (i = 0; i < 10; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- printk(KERN_INFO "CPU %d is now offline\n", cpu);
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
if (1 == num_online_cpus())
alternatives_smp_switch(0);
return;
}
msleep(100);
}
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+ pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index cd022121cab6..c652ef62742d 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -12,21 +12,19 @@
#endif
/* ready for x86_64 and x86 */
-unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
+unsigned char *__trampinitdata trampoline_base;
void __init reserve_trampoline_memory(void)
{
-#ifdef CONFIG_X86_32
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
-#endif
+ unsigned long mem;
+
/* Has to be in very low memory so we can execute real-mode AP code. */
- reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE,
- "TRAMPOLINE");
+ mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
+ if (mem == -1L)
+ panic("Cannot allocate trampoline\n");
+
+ trampoline_base = __va(mem);
+ reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
}
/*
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5d..0aa5fed8b9e6 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37ec038..1d9b33843c80 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
static int svm_hardware_enable(void *garbage)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
uint64_t efer;
struct descriptor_table gdt_descr;
struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
me);
return -EINVAL;
}
- svm_data = per_cpu(svm_data, me);
+ sd = per_cpu(svm_data, me);
- if (!svm_data) {
+ if (!sd) {
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
me);
return -EINVAL;
}
- svm_data->asid_generation = 1;
- svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
- svm_data->next_asid = svm_data->max_asid + 1;
+ sd->asid_generation = 1;
+ sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+ sd->next_asid = sd->max_asid + 1;
kvm_get_gdt(&gdt_descr);
gdt = (struct desc_struct *)gdt_descr.base;
- svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+ sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
- wrmsrl(MSR_VM_HSAVE_PA,
- page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+ wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
return 0;
}
static void svm_cpu_uninit(int cpu)
{
- struct svm_cpu_data *svm_data
- = per_cpu(svm_data, raw_smp_processor_id());
+ struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
- if (!svm_data)
+ if (!sd)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
- __free_page(svm_data->save_area);
- kfree(svm_data);
+ __free_page(sd->save_area);
+ kfree(sd);
}
static int svm_cpu_init(int cpu)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
int r;
- svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
- if (!svm_data)
+ sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
+ if (!sd)
return -ENOMEM;
- svm_data->cpu = cpu;
- svm_data->save_area = alloc_page(GFP_KERNEL);
+ sd->cpu = cpu;
+ sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
- if (!svm_data->save_area)
+ if (!sd->save_area)
goto err_1;
- per_cpu(svm_data, cpu) = svm_data;
+ per_cpu(svm_data, cpu) = sd;
return 0;
err_1:
- kfree(svm_data);
+ kfree(sd);
return r;
}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
#endif
}
-static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
- if (svm_data->next_asid > svm_data->max_asid) {
- ++svm_data->asid_generation;
- svm_data->next_asid = 1;
+ if (sd->next_asid > sd->max_asid) {
+ ++sd->asid_generation;
+ sd->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
- svm->asid_generation = svm_data->asid_generation;
- svm->vmcb->control.asid = svm_data->next_asid++;
+ svm->asid_generation = sd->asid_generation;
+ svm->vmcb->control.asid = sd->next_asid++;
}
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
- svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
load_TR_desc();
}
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/* FIXME: handle wraparound of asid_generation */
- if (svm->asid_generation != svm_data->asid_generation)
- new_asid(svm, svm_data);
+ if (svm->asid_generation != sd->asid_generation)
+ new_asid(svm, sd);
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 41628b104b9e..872834177937 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -7,7 +7,6 @@ struct msr_info {
u32 msr_no;
struct msr reg;
struct msr *msrs;
- int off;
int err;
};
@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id();
if (rv->msrs)
- reg = &rv->msrs[this_cpu - rv->off];
+ reg = per_cpu_ptr(rv->msrs, this_cpu);
else
reg = &rv->reg;
@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id();
if (rv->msrs)
- reg = &rv->msrs[this_cpu - rv->off];
+ reg = per_cpu_ptr(rv->msrs, this_cpu);
else
reg = &rv->reg;
@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
memset(&rv, 0, sizeof(rv));
- rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;
@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
}
EXPORT_SYMBOL(wrmsr_on_cpus);
+struct msr *msrs_alloc(void)
+{
+ struct msr *msrs = NULL;
+
+ msrs = alloc_percpu(struct msr);
+ if (!msrs) {
+ pr_warning("%s: error allocating msrs\n", __func__);
+ return NULL;
+ }
+
+ return msrs;
+}
+EXPORT_SYMBOL(msrs_alloc);
+
+void msrs_free(struct msr *msrs)
+{
+ free_percpu(msrs);
+}
+EXPORT_SYMBOL(msrs_free);
+
/* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 4c765e9c4664..34a3291ca103 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -20,7 +20,7 @@
* Derived from the read-mod example from relay-examples by Tom Zanussi.
*/
-#define pr_fmt(fmt) "mmiotrace: "
+#define pr_fmt(fmt) "mmiotrace: " fmt
#define DEBUG 1
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0ba5fc..563d20504988 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
cpumask_var_t xen_cpu_initialized_map;
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, callfuncsingle_irq);
-static DEFINE_PER_CPU(int, debug_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq);
+static DEFINE_PER_CPU(int, xen_callfunc_irq);
+static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(resched_irq, cpu) = rc;
+ per_cpu(xen_resched_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfunc_irq, cpu) = rc;
+ per_cpu(xen_callfunc_irq, cpu) = rc;
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
debug_name, NULL);
if (rc < 0)
goto fail;
- per_cpu(debug_irq, cpu) = rc;
+ per_cpu(xen_debug_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfuncsingle_irq, cpu) = rc;
+ per_cpu(xen_callfuncsingle_irq, cpu) = rc;
return 0;
fail:
- if (per_cpu(resched_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- if (per_cpu(callfunc_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- if (per_cpu(debug_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- if (per_cpu(callfuncsingle_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ if (per_cpu(xen_resched_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ if (per_cpu(xen_callfunc_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ if (per_cpu(xen_debug_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
+ NULL);
return rc;
}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108df..24ded31b5aec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
unsigned short spinners; /* count of waiting cpus */
};
-static int xen_spin_is_locked(struct raw_spinlock *lock)
+static int xen_spin_is_locked(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
return xl->lock != 0;
}
-static int xen_spin_is_contended(struct raw_spinlock *lock)
+static int xen_spin_is_contended(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
return xl->spinners != 0;
}
-static int xen_spin_trylock(struct raw_spinlock *lock)
+static int xen_spin_trylock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
__get_cpu_var(lock_spinners) = prev;
}
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
+static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
return ret;
}
-static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
+static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
spin_time_accum_total(start_spin);
}
-static void xen_spin_lock(struct raw_spinlock *lock)
+static void xen_spin_lock(struct arch_spinlock *lock)
{
__xen_spin_lock(lock, false);
}
-static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
{
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
}
}
-static void xen_spin_unlock(struct raw_spinlock *lock)
+static void xen_spin_unlock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853120d8..0d3f07cd1b5f 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
#define NS_PER_TICK (1000000000LL / HZ)
/* runstate info updated by Xen */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
/* unused ns of stolen and blocked time */
-static DEFINE_PER_CPU(u64, residual_stolen);
-static DEFINE_PER_CPU(u64, residual_blocked);
+static DEFINE_PER_CPU(u64, xen_residual_stolen);
+static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
- return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
+ return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
}
void xen_setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- area.addr.v = &per_cpu(runstate, cpu);
+ area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
WARN_ON(state.state != RUNSTATE_running);
- snap = &__get_cpu_var(runstate_snapshot);
+ snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
/* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. */
- stolen = runnable + offline + __get_cpu_var(residual_stolen);
+ stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
if (stolen < 0)
stolen = 0;
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
- __get_cpu_var(residual_stolen) = stolen;
+ __get_cpu_var(xen_residual_stolen) = stolen;
account_steal_ticks(ticks);
/* Add the appropriate number of ticks of blocked time,
including any left-overs from last time. */
- blocked += __get_cpu_var(residual_blocked);
+ blocked += __get_cpu_var(xen_residual_blocked);
if (blocked < 0)
blocked = 0;
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __get_cpu_var(residual_blocked) = blocked;
+ __get_cpu_var(xen_residual_blocked) = blocked;
account_idle_ticks(ticks);
}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a1badb32fcda..8cd38484e130 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cfb0b2f5f63d..e2f80463ed0d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -283,7 +283,7 @@ struct cfq_data {
*/
struct cfq_queue oom_cfqq;
- unsigned long last_end_sync_rq;
+ unsigned long last_delayed_sync;
/* List of cfq groups being managed on this device*/
struct hlist_head cfqg_list;
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
- CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
};
#define CFQ_CFQQ_FNS(name) \
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
-CFQ_CFQQ_FNS(wait_busy_done);
#undef CFQ_CFQQ_FNS
#ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
- cfq_clear_cfqq_wait_busy_done(cfqq);
/*
* store what was left of this slice, if the queue idled/timed out
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
return NULL;
/*
+ * Don't search priority tree if it's the only queue in the group.
+ */
+ if (cur_cfqq->cfqg->nr_cfqq == 1)
+ return NULL;
+
+ /*
* We should notice if some of the queues are cooperating, eg
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
cfqd->serving_type = cfqg->saved_workload;
cfqd->serving_prio = cfqg->saved_serving_prio;
- }
+ } else
+ cfqd->workload_expires = jiffies - 1;
+
choose_service_tree(cfqd, cfqg);
}
@@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
if (!cfqd->rq_queued)
return NULL;
+
/*
- * The active queue has run out of time, expire it and select new.
+ * We were waiting for group to get backlogged. Expire the queue
*/
- if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
- && !cfq_cfqq_must_dispatch(cfqq))
+ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
goto expire;
/*
+ * The active queue has run out of time, expire it and select new.
+ */
+ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
+ /*
+ * If slice had not expired at the completion of last request
+ * we might not have turned on wait_busy flag. Don't expire
+ * the queue yet. Allow the group to get backlogged.
+ *
+ * The very fact that we have used the slice, that means we
+ * have been idling all along on this queue and it should be
+ * ok to wait for this request to complete.
+ */
+ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
+ && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ } else
+ goto expire;
+ }
+
+ /*
* The active queue has requests and isn't expired, allow it to
* dispatch.
*/
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* based on the last sync IO we serviced
*/
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
- unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+ unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
unsigned int depth;
depth = last_sync / cfqd->cfq_slice[1];
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
- if (cfq_cfqq_wait_busy(cfqq)) {
- cfq_clear_cfqq_wait_busy(cfqq);
- cfq_mark_cfqq_wait_busy_done(cfqq);
- }
/*
* Remember that we saw a request from this process, but
* don't start queuing just yet. Otherwise we risk seeing lots
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
del_timer(&cfqd->idle_slice_timer);
+ cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else
cfq_mark_cfqq_must_dispatch(cfqq);
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
cfqd->hw_tag = 0;
}
+static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct cfq_io_context *cic = cfqd->active_cic;
+
+ /* If there are other queues in the group, don't wait */
+ if (cfqq->cfqg->nr_cfqq > 1)
+ return false;
+
+ if (cfq_slice_used(cfqq))
+ return true;
+
+ /* if slice left is less than think time, wait busy */
+ if (cic && sample_valid(cic->ttime_samples)
+ && (cfqq->slice_end - jiffies < cic->ttime_mean))
+ return true;
+
+ /*
+ * If think times is less than a jiffy than ttime_mean=0 and above
+ * will not be true. It might happen that slice has not expired yet
+ * but will expire soon (4-5 ns) during select_queue(). To cover the
+ * case where think time is less than a jiffy, mark the queue wait
+ * busy if only 1 jiffy is left in the slice.
+ */
+ if (cfqq->slice_end - jiffies == 1)
+ return true;
+
+ return false;
+}
+
static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (sync) {
RQ_CIC(rq)->last_end_request = now;
- cfqd->last_end_sync_rq = now;
+ if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
+ cfqd->last_delayed_sync = now;
}
/*
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
/*
- * If this queue consumed its slice and this is last queue
- * in the group, wait for next request before we expire
- * the queue
+ * Should we wait for next request to come in before we expire
+ * the queue.
*/
- if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
+ if (cfq_should_wait_busy(cfqd, cfqq)) {
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
cfq_mark_cfqq_wait_busy(cfqq);
}
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_latency = 1;
cfqd->cfq_group_isolation = 0;
cfqd->hw_tag = -1;
- cfqd->last_end_sync_rq = jiffies;
+ /*
+ * we optimistically start assuming sync ops weren't delayed in last
+ * second, in order to have larger depth for async operations.
+ */
+ cfqd->last_delayed_sync = jiffies - HZ;
INIT_RCU_HEAD(&cfqd->rcu);
return cfqd;
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index f8ae0d94a647..704c14115323 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct cryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 26e434ad373c..8a07363417ed 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,6 +96,8 @@ source "drivers/edac/Kconfig"
source "drivers/rtc/Kconfig"
+source "drivers/clocksource/Kconfig"
+
source "drivers/dma/Kconfig"
source "drivers/dca/Kconfig"
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 27fd775375b0..958bd1540c30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
* boot up and this data does not change there after. Hence this
* operation should be safe. No locking required.
*/
- addr = __pa(per_cpu_ptr(crash_notes, cpunum));
+ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1fe5536d404f..70122791683d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev,
}
static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+#ifdef CONFIG_HUGETLBFS
+/*
+ * hugetlbfs per node attributes registration interface:
+ * When/if hugetlb[fs] subsystem initializes [sometime after this module],
+ * it will register its per node attributes for all online nodes with
+ * memory. It will also call register_hugetlbfs_with_node(), below, to
+ * register its attribute registration functions with this node driver.
+ * Once these hooks have been initialized, the node driver will call into
+ * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ */
+static node_registration_func_t __hugetlb_register_node;
+static node_registration_func_t __hugetlb_unregister_node;
+
+static inline bool hugetlb_register_node(struct node *node)
+{
+ if (__hugetlb_register_node &&
+ node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+ __hugetlb_register_node(node);
+ return true;
+ }
+ return false;
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+ if (__hugetlb_unregister_node)
+ __hugetlb_unregister_node(node);
+}
+
+void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister)
+{
+ __hugetlb_register_node = doregister;
+ __hugetlb_unregister_node = unregister;
+}
+#else
+static inline void hugetlb_register_node(struct node *node) {}
+
+static inline void hugetlb_unregister_node(struct node *node) {}
+#endif
+
/*
* register_node - Setup a sysfs device for a node.
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
+
+ hugetlb_register_node(node);
}
return error;
}
@@ -216,6 +259,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
+ hugetlb_unregister_node(node); /* no-op, if memoryless node */
sysdev_unregister(&node->sysdev);
}
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES];
*/
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- if (node_online(nid)) {
- struct sys_device *obj = get_cpu_sysdev(cpu);
- if (!obj)
- return 0;
- return sysfs_create_link(&node_devices[nid].sysdev.kobj,
- &obj->kobj,
- kobject_name(&obj->kobj));
- }
+ int ret;
+ struct sys_device *obj;
- return 0;
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+
+ ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ &obj->kobj,
+ kobject_name(&obj->kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&obj->kobj,
+ &node_devices[nid].sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- if (node_online(nid)) {
- struct sys_device *obj = get_cpu_sysdev(cpu);
- if (obj)
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
- kobject_name(&obj->kobj));
- }
+ struct sys_device *obj;
+
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+
+ sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ kobject_name(&obj->kobj));
+ sysfs_remove_link(&obj->kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
+
return 0;
}
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn)
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
{
+ int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
+ ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
&mem_blk->sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
+ &node_devices[nid].sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
/* unregister memory section under all nodes that it spans */
int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
- nodemask_t unlinked_nodes;
+ NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
- if (!mem_blk)
+ if (!mem_blk) {
+ NODEMASK_FREE(unlinked_nodes);
return -EFAULT;
- nodes_clear(unlinked_nodes);
+ }
+ if (!unlinked_nodes)
+ return -ENOMEM;
+ nodes_clear(*unlinked_nodes);
sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
continue;
if (!node_online(nid))
continue;
- if (node_test_and_set(nid, unlinked_nodes))
+ if (node_test_and_set(nid, *unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid].sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
+ sysfs_remove_link(&mem_blk->sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
+ NODEMASK_FREE(unlinked_nodes);
return 0;
}
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid)
}
return err;
}
-#else
+
+#ifdef CONFIG_HUGETLBFS
+/*
+ * Handle per node hstate attribute [un]registration on transistions
+ * to/from memoryless state.
+ */
+static void node_hugetlb_work(struct work_struct *work)
+{
+ struct node *node = container_of(work, struct node, node_work);
+
+ /*
+ * We only get here when a node transitions to/from memoryless state.
+ * We can detect which transition occurred by examining whether the
+ * node has memory now. hugetlb_register_node() already check this
+ * so we try to register the attributes. If that fails, then the
+ * node has transitioned to memoryless, try to unregister the
+ * attributes.
+ */
+ if (!hugetlb_register_node(node))
+ hugetlb_unregister_node(node);
+}
+
+static void init_node_hugetlb_work(int nid)
+{
+ INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+}
+
+static int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+
+ switch (action) {
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ /*
+ * offload per node hstate [un]registration to a work thread
+ * when transitioning to/from memoryless state.
+ */
+ if (nid != NUMA_NO_NODE)
+ schedule_work(&node_devices[nid].node_work);
+ break;
+
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HUGETLBFS */
+#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
+
static int link_mem_sections(int nid) { return 0; }
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+
+#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
+ !defined(CONFIG_HUGETLBFS)
+static inline int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ return NOTIFY_OK;
+}
+
+static void init_node_hugetlb_work(int nid) { }
+
+#endif
int register_one_node(int nid)
{
@@ -371,6 +514,9 @@ int register_one_node(int nid)
/* link memory sections under this node */
error = link_mem_sections(nid);
+
+ /* initialize work queue for memory hot plug */
+ init_node_hugetlb_work(nid);
}
return error;
@@ -460,13 +606,17 @@ static int node_states_init(void)
return err;
}
+#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
ret = sysdev_class_register(&node_class);
- if (!ret)
+ if (!ret) {
ret = node_states_init();
+ hotplug_memory_notifier(node_memory_callback,
+ NODE_CALLBACK_PRI);
+ }
/*
* Note: we're not going to unregister the node class if we fail
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 436a090b532b..4e0726aa53b0 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
- != CRYPTO_ALG_TYPE_HASH) {
+ if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
retcode = ERR_AUTH_ALG_ND;
goto fail;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 5c01f747571b..3266b4f65daa 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3497,6 +3497,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
+ if (WARN_ON(size < 0 || size > sizeof(inparam)))
+ return -EINVAL;
+
/* copyin */
CLEARSTRUCT(&inparam);
if (_IOC_DIR(cmd) & _IOC_WRITE)
@@ -4162,7 +4165,7 @@ static int floppy_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops floppy_pm_ops = {
+static const struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0877d3628fda..d1fd032e7514 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -169,13 +169,6 @@ static int __init xd_init(void)
init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
- if (!xd_dma_buffer)
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- printk(KERN_ERR "xd: Out of memory.\n");
- return -ENOMEM;
- }
-
err = -EBUSY;
if (register_blkdev(XT_DISK_MAJOR, "xd"))
goto out1;
@@ -202,6 +195,19 @@ static int __init xd_init(void)
xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
}
+ /*
+ * With the drive detected, xd_maxsectors should now be known.
+ * If xd_maxsectors is 0, nothing was detected and we fall through
+ * to return -ENODEV
+ */
+ if (!xd_dma_buffer && xd_maxsectors) {
+ xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
+ if (!xd_dma_buffer) {
+ printk(KERN_ERR "xd: Out of memory.\n");
+ goto out3;
+ }
+ }
+
err = -ENODEV;
if (!xd_drives)
goto out3;
@@ -249,15 +255,17 @@ out4:
for (i = 0; i < xd_drives; i++)
put_disk(xd_gendisk[i]);
out3:
- release_region(xd_iobase,4);
+ if (xd_maxsectors)
+ release_region(xd_iobase,4);
+
+ if (xd_dma_buffer)
+ xd_dma_mem_free((unsigned long)xd_dma_buffer,
+ xd_maxsectors * 0x200);
out2:
blk_cleanup_queue(xd_queue);
out1a:
unregister_blkdev(XT_DISK_MAJOR, "xd");
out1:
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
return err;
Enomem:
err = -ENOMEM;
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index b8a5d654d3d0..fe62bd0e17b7 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -931,7 +931,7 @@ static struct hv_ops hvc_iucv_ops = {
};
/* Suspend / resume device operations */
-static struct dev_pm_ops hvc_iucv_pm_ops = {
+static const struct dev_pm_ops hvc_iucv_pm_ops = {
.freeze = hvc_iucv_pm_freeze,
.thaw = hvc_iucv_pm_restore_thaw,
.restore = hvc_iucv_pm_restore_thaw,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index fba76fb55abf..be832b6f8279 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -34,6 +34,16 @@
# include <linux/efi.h>
#endif
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+ return min(sz, size);
+}
+
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
@@ -126,9 +136,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
@@ -141,15 +149,9 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
+ unsigned long remaining;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
@@ -163,12 +165,10 @@ static ssize_t read_mem(struct file * file, char __user * buf,
if (!ptr)
return -EFAULT;
- if (copy_to_user(buf, ptr, sz)) {
- unxlate_dev_mem_ptr(p, ptr);
- return -EFAULT;
- }
-
+ remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
buf += sz;
p += sz;
@@ -196,9 +196,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
@@ -208,15 +206,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
@@ -234,16 +224,14 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
}
copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
- unxlate_dev_mem_ptr(p, ptr);
if (written)
break;
return -EFAULT;
}
- unxlate_dev_mem_ptr(p, ptr);
-
buf += sz;
p += sz;
count -= sz;
@@ -417,27 +405,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE && low_count > 0) {
- size_t tmp = PAGE_SIZE - p;
- if (tmp > low_count) tmp = low_count;
- if (clear_user(buf, tmp))
+ sz = size_inside_page(p, low_count);
+ if (clear_user(buf, sz))
return -EFAULT;
- buf += tmp;
- p += tmp;
- read += tmp;
- low_count -= tmp;
- count -= tmp;
+ buf += sz;
+ p += sz;
+ read += sz;
+ low_count -= sz;
+ count -= sz;
}
#endif
while (low_count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, low_count);
+ sz = size_inside_page(p, low_count);
/*
* On ia64 if a page has been mapped somewhere as
@@ -461,21 +440,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
if (!kbuf)
return -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len = vread(kbuf, (char *)p, len);
- if (!len)
+ sz = size_inside_page(p, count);
+ sz = vread(kbuf, (char *)p, sz);
+ if (!sz)
break;
- if (copy_to_user(buf, kbuf, len)) {
+ if (copy_to_user(buf, kbuf, sz)) {
free_page((unsigned long)kbuf);
return -EFAULT;
}
- count -= len;
- buf += len;
- read += len;
- p += len;
+ count -= sz;
+ buf += sz;
+ read += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}
@@ -485,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
static inline ssize_t
-do_write_kmem(void *p, unsigned long realp, const char __user * buf,
+do_write_kmem(unsigned long p, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t written, sz;
@@ -494,14 +470,11 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
- if (realp < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - realp;
- if (sz > count)
- sz = count;
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -509,22 +482,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
while (count > 0) {
char *ptr;
- /*
- * Handle first page in case it's not aligned
- */
- if (-realp & (PAGE_SIZE - 1))
- sz = -realp & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
- ptr = xlate_dev_kmem_ptr(p);
+ ptr = xlate_dev_kmem_ptr((char *)p);
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -535,7 +501,6 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
}
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -554,19 +519,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- ssize_t written;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
if (p < (unsigned long) high_memory) {
-
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- written = do_write_kmem((void*)p, p, buf, wrote, ppos);
- if (written != wrote)
- return written;
- wrote = written;
+ unsigned long to_write = min_t(unsigned long, count,
+ (unsigned long)high_memory - p);
+ wrote = do_write_kmem(p, buf, to_write, ppos);
+ if (wrote != to_write)
+ return wrote;
p += wrote;
buf += wrote;
count -= wrote;
@@ -577,24 +537,21 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
if (!kbuf)
return wrote ? wrote : -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len) {
- written = copy_from_user(kbuf, buf, len);
- if (written) {
- if (wrote + virtr)
- break;
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
+ unsigned long sz = size_inside_page(p, count);
+ unsigned long n;
+
+ n = copy_from_user(kbuf, buf, sz);
+ if (n) {
+ if (wrote + virtr)
+ break;
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
}
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
+ sz = vwrite(kbuf, (char *)p, sz);
+ count -= sz;
+ buf += sz;
+ virtr += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 96f1cd086dd2..94a136e96c06 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -60,9 +60,7 @@ static DEFINE_MUTEX(misc_mtx);
* Assigned numbers, used for dynamic minors
*/
#define DYNAMIC_MINORS 64 /* like dynamic majors */
-static unsigned char misc_minors[DYNAMIC_MINORS / 8];
-
-extern int pmu_device_init(void);
+static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -198,24 +196,23 @@ int misc_register(struct miscdevice * misc)
}
if (misc->minor == MISC_DYNAMIC_MINOR) {
- int i = DYNAMIC_MINORS;
- while (--i >= 0)
- if ( (misc_minors[i>>3] & (1 << (i&7))) == 0)
- break;
- if (i<0) {
+ int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
+ if (i >= DYNAMIC_MINORS) {
mutex_unlock(&misc_mtx);
return -EBUSY;
}
- misc->minor = i;
+ misc->minor = DYNAMIC_MINORS - i - 1;
+ set_bit(i, misc_minors);
}
- if (misc->minor < DYNAMIC_MINORS)
- misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
dev = MKDEV(MISC_MAJOR, misc->minor);
misc->this_device = device_create(misc_class, misc->parent, dev,
misc, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ int i = DYNAMIC_MINORS - misc->minor - 1;
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
err = PTR_ERR(misc->this_device);
goto out;
}
@@ -242,7 +239,7 @@ int misc_register(struct miscdevice * misc)
int misc_deregister(struct miscdevice *misc)
{
- int i = misc->minor;
+ int i = DYNAMIC_MINORS - misc->minor - 1;
if (list_empty(&misc->list))
return -EINVAL;
@@ -250,9 +247,8 @@ int misc_deregister(struct miscdevice *misc)
mutex_lock(&misc_mtx);
list_del(&misc->list);
device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
- if (i < DYNAMIC_MINORS && i>0) {
- misc_minors[i>>3] &= ~(1 << (misc->minor & 7));
- }
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
mutex_unlock(&misc_mtx);
return 0;
}
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 4008e2ce73c1..fdbcc9fd6d31 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -264,10 +264,16 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
unsigned char contents[NVRAM_BYTES];
unsigned i = *ppos;
unsigned char *tmp;
- int len;
- len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
- if (copy_from_user(contents, buf, len))
+ if (i >= NVRAM_BYTES)
+ return 0; /* Past EOF */
+
+ if (count > NVRAM_BYTES - i)
+ count = NVRAM_BYTES - i;
+ if (count > NVRAM_BYTES)
+ return -EFAULT; /* Can't happen, but prove it to gcc */
+
+ if (copy_from_user(contents, buf, count))
return -EFAULT;
spin_lock_irq(&rtc_lock);
@@ -275,7 +281,7 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
if (!__nvram_check_checksum())
goto checksum_err;
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
+ for (tmp = contents; count--; ++i, ++tmp)
__nvram_write_byte(*tmp, i);
__nvram_set_checksum();
diff --git a/drivers/char/random.c b/drivers/char/random.c
index dcd08635cf1b..8258982b49ec 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1245,12 +1245,8 @@ static int proc_do_uuid(ctl_table *table, int write,
if (uuid[8] == 0)
generate_random_uuid(uuid);
- sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x",
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15]);
+ sprintf(buf, "%pU", uuid);
+
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
@@ -1310,7 +1306,7 @@ ctl_table random_table[] = {
/********************************************************************
*
- * Random funtions for networking
+ * Random functions for networking
*
********************************************************************/
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e3d728dbf7e..e43fbc66aef0 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -184,12 +184,10 @@ static DECLARE_WORK(console_work, console_callback);
* fg_console is the current virtual console,
* last_console is the last used one,
* want_console is the console we want to switch to,
- * kmsg_redirect is the console for kernel messages,
*/
int fg_console;
int last_console;
int want_console = -1;
-int kmsg_redirect;
/*
* For each existing display, we have a pointer to console currently visible
@@ -2434,6 +2432,37 @@ struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
+/**
+ * vt_kmsg_redirect() - Sets/gets the kernel message console
+ * @new: The new virtual terminal number or -1 if the console should stay
+ * unchanged
+ *
+ * By default, the kernel messages are always printed on the current virtual
+ * console. However, the user may modify that default with the
+ * TIOCL_SETKMSGREDIRECT ioctl call.
+ *
+ * This function sets the kernel message console to be @new. It returns the old
+ * virtual console number. The virtual terminal number 0 (both as parameter and
+ * return value) means no redirection (i.e. always printed on the currently
+ * active console).
+ *
+ * The parameter -1 means that only the current console is returned, but the
+ * value is not modified. You may use the macro vt_get_kmsg_redirect() in that
+ * case to make the code more understandable.
+ *
+ * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
+ * the parameter and always returns 0.
+ */
+int vt_kmsg_redirect(int new)
+{
+ static int kmsg_con;
+
+ if (new != -1)
+ return xchg(&kmsg_con, new);
+ else
+ return kmsg_con;
+}
+
/*
* Console on virtual terminal
*
@@ -2448,6 +2477,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
const ushort *start;
ushort cnt = 0;
ushort myx;
+ int kmsg_console;
/* console busy or not yet initialized */
if (!printable)
@@ -2455,8 +2485,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
if (!spin_trylock(&printing_lock))
return;
- if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1))
- vc = vc_cons[kmsg_redirect - 1].d;
+ kmsg_console = vt_get_kmsg_redirect();
+ if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
+ vc = vc_cons[kmsg_console - 1].d;
/* read `x' only after setting currcons properly (otherwise
the `x' macro will read the x of the foreground console). */
@@ -2613,7 +2644,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
ret = set_vesa_blanking(p);
break;
case TIOCL_GETKMSGREDIRECT:
- data = kmsg_redirect;
+ data = vt_get_kmsg_redirect();
ret = __put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
@@ -2623,7 +2654,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
if (get_user(data, p+1))
ret = -EFAULT;
else
- kmsg_redirect = data;
+ vt_kmsg_redirect(data);
}
break;
case TIOCL_GETFGCONSOLE:
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
new file mode 100644
index 000000000000..08f726c5fee5
--- /dev/null
+++ b/drivers/clocksource/Kconfig
@@ -0,0 +1,9 @@
+config CS5535_CLOCK_EVENT_SRC
+ tristate "CS5535/CS5536 high-res timer (MFGPT) events"
+ depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
+ help
+ This driver provides a clock event source based on the MFGPT
+ timer(s) in the CS5535 and CS5536 companion chips.
+ MFGPTs have a better resolution and max interval than the
+ generic PIT, and are suitable for use as high-res timers.
+
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index eef216f7f61d..be61ece6330b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
+obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
new file mode 100644
index 000000000000..27d20fac19d1
--- /dev/null
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -0,0 +1,197 @@
+/*
+ * Clock event driver for the CS5535/CS5536
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/cs5535.h>
+#include <linux/clockchips.h>
+
+#define DRV_NAME "cs5535-clockevt"
+
+static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+module_param_named(irq, timer_irq, int, 0644);
+MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
+
+/*
+ * We are using the 32.768kHz input clock - it's the only one that has the
+ * ranges we find desirable. The following table lists the suitable
+ * divisors and the associated Hz, minimum interval and the maximum interval:
+ *
+ * Divisor Hz Min Delta (s) Max Delta (s)
+ * 1 32768 .00048828125 2.000
+ * 2 16384 .0009765625 4.000
+ * 4 8192 .001953125 8.000
+ * 8 4096 .00390625 16.000
+ * 16 2048 .0078125 32.000
+ * 32 1024 .015625 64.000
+ * 64 512 .03125 128.000
+ * 128 256 .0625 256.000
+ * 256 128 .125 512.000
+ */
+
+static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
+static struct cs5535_mfgpt_timer *cs5535_event_clock;
+
+/* Selected from the table above */
+
+#define MFGPT_DIVISOR 16
+#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
+#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
+#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
+
+/*
+ * The MFPGT timers on the CS5536 provide us with suitable timers to use
+ * as clock event sources - not as good as a HPET or APIC, but certainly
+ * better than the PIT. This isn't a general purpose MFGPT driver, but
+ * a simplified one designed specifically to act as a clock event source.
+ * For full details about the MFGPT, please consult the CS5536 data sheet.
+ */
+
+static void disable_timer(struct cs5535_mfgpt_timer *timer)
+{
+ /* avoid races by clearing CMP1 and CMP2 unconditionally */
+ cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
+ (uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 |
+ MFGPT_SETUP_CMP2);
+}
+
+static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
+{
+ cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta);
+ cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0);
+
+ cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+}
+
+static void mfgpt_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ disable_timer(cs5535_event_clock);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC)
+ start_timer(cs5535_event_clock, MFGPT_PERIODIC);
+
+ cs5535_tick_mode = mode;
+}
+
+static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ start_timer(cs5535_event_clock, delta);
+ return 0;
+}
+
+static struct clock_event_device cs5535_clockevent = {
+ .name = DRV_NAME,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = mfgpt_set_mode,
+ .set_next_event = mfgpt_next_event,
+ .rating = 250,
+ .cpumask = cpu_all_mask,
+ .shift = 32
+};
+
+static irqreturn_t mfgpt_tick(int irq, void *dev_id)
+{
+ uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP);
+
+ /* See if the interrupt was for us */
+ if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
+ return IRQ_NONE;
+
+ /* Turn off the clock (and clear the event) */
+ disable_timer(cs5535_event_clock);
+
+ if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
+ return IRQ_HANDLED;
+
+ /* Clear the counter */
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0);
+
+ /* Restart the clock in periodic mode */
+
+ if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+
+ cs5535_clockevent.event_handler(&cs5535_clockevent);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mfgptirq = {
+ .handler = mfgpt_tick,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .name = DRV_NAME,
+};
+
+static int __init cs5535_mfgpt_init(void)
+{
+ struct cs5535_mfgpt_timer *timer;
+ int ret;
+ uint16_t val;
+
+ timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ if (!timer) {
+ printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
+ return -ENODEV;
+ }
+ cs5535_event_clock = timer;
+
+ /* Set up the IRQ on the MFGPT side */
+ if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
+ printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
+ timer_irq);
+ return -EIO;
+ }
+
+ /* And register it with the kernel */
+ ret = setup_irq(timer_irq, &mfgptirq);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
+ goto err;
+ }
+
+ /* Set the clock scale and enable the event mode for CMP2 */
+ val = MFGPT_SCALE | (3 << 8);
+
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
+
+ /* Set up the clock event */
+ cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
+ cs5535_clockevent.shift);
+ cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
+ &cs5535_clockevent);
+ cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
+ &cs5535_clockevent);
+
+ printk(KERN_INFO DRV_NAME
+ ": Registering MFGPT timer as a clock event, using IRQ %d\n",
+ timer_irq);
+ clockevents_register_device(&cs5535_clockevent);
+
+ return 0;
+
+err:
+ cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
+ printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
+ return -EIO;
+}
+
+module_init(cs5535_mfgpt_init);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f20668c09ce0..67bc2ece7b4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
-static DEFINE_PER_CPU(int, policy_cpu);
+static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
int lock_policy_rwsem_##mode \
(int cpu) \
{ \
- int policy_cpu = per_cpu(policy_cpu, cpu); \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
if (unlikely(!cpu_online(cpu))) { \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
void unlock_policy_rwsem_read(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
void unlock_policy_rwsem_write(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
/* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu);
- per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
if (lock_policy_rwsem_write(cpu) < 0) {
/* Should not go through policy unlock path */
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(policy_cpu, j) = policy->cpu;
+ per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
- per_cpu(policy_cpu, cpu) = cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
int cpu;
for_each_possible_cpu(cpu) {
- per_cpu(policy_cpu, cpu) = -1;
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a9bd3a05a684..05432216e224 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
ssize_t count = 0;
struct cpufreq_frequency_table *table;
- if (!per_cpu(show_table, cpu))
+ if (!per_cpu(cpufreq_show_table, cpu))
return -ENODEV;
- table = per_cpu(show_table, cpu);
+ table = per_cpu(cpufreq_show_table, cpu);
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu)
{
dprintk("setting show_table for cpu %u to %p\n", cpu, table);
- per_cpu(show_table, cpu) = table;
+ per_cpu(cpufreq_show_table, cpu) = table;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
void cpufreq_frequency_table_put_attr(unsigned int cpu)
{
dprintk("clearing show_table for cpu %u\n", cpu);
- per_cpu(show_table, cpu) = NULL;
+ per_cpu(cpufreq_show_table, cpu) = NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
- return per_cpu(show_table, cpu);
+ return per_cpu(cpufreq_show_table, cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index a4bec3f919aa..1c1ceb4f218f 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -69,9 +69,6 @@ static int ladder_select_state(struct cpuidle_device *dev)
int last_residency, last_idx = ldev->last_state_idx;
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
- if (unlikely(!ldev))
- return 0;
-
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
ladder_do_selection(ldev, last_idx, 0);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 84c51e177269..8c2f3703ec85 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -64,7 +64,7 @@ struct aes_ctx {
u32 *D;
};
-static DEFINE_PER_CPU(struct cword *, last_cword);
+static DEFINE_PER_CPU(struct cword *, paes_last_cword);
/* Tells whether the ACE is capable to generate
the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ok:
for_each_online_cpu(cpu)
- if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
- &ctx->cword.decrypt == per_cpu(last_cword, cpu))
- per_cpu(last_cword, cpu) = NULL;
+ if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
+ &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
+ per_cpu(paes_last_cword, cpu) = NULL;
return 0;
}
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
{
int cpu = raw_smp_processor_id();
- if (cword != per_cpu(last_cword, cpu))
+ if (cword != per_cpu(paes_last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
#else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
static inline void padlock_store_cword(struct cword *cword)
{
- per_cpu(last_cword, raw_smp_processor_id()) = cword;
+ per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
/*
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c52ac9efd0bf..f15112569c1d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1188,7 +1188,7 @@ static int at_dma_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops at_dma_dev_pm_ops = {
+static const struct dev_pm_ops at_dma_dev_pm_ops = {
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082ce..6f51a0a7a8bb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
*/
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
- struct dma_chan *chan;
- int cpu;
-
- cpu = get_cpu();
- chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
- put_cpu();
-
- return chan;
+ return this_cpu_read(channel_table[tx_type]->chan);
}
EXPORT_SYMBOL(dma_find_channel);
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2eea823516a7..285bed0fe17b 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1427,7 +1427,7 @@ static int dw_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops dw_dev_pm_ops = {
+static const struct dev_pm_ops dw_dev_pm_ops = {
.suspend_noirq = dw_suspend_noirq,
.resume_noirq = dw_resume_noirq,
};
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index fb6bb64e8861..3ebc61067e54 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1313,7 +1313,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
}
-static struct dev_pm_ops txx9dmac_dev_pm_ops = {
+static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
.suspend_noirq = txx9dmac_suspend_noirq,
.resume_noirq = txx9dmac_resume_noirq,
};
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5fdd6daa40ea..df5b68433f34 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
+static struct msr *msrs;
+
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
@@ -2495,8 +2497,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
cpumask_var_t mask;
- struct msr *msrs;
- int cpu, nbe, idx = 0;
+ int cpu, nbe;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -2507,32 +2508,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
get_cpus_on_this_dct_cpumask(mask, nid);
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
- if (!msrs) {
- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
- __func__);
- free_cpumask_var(mask);
- return false;
- }
-
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ nbe = reg->l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, msrs[idx].q,
+ cpu, reg->q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
-
- idx++;
}
ret = true;
out:
- kfree(msrs);
free_cpumask_var(mask);
return ret;
}
@@ -2540,8 +2531,7 @@ out:
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
{
cpumask_var_t cmask;
- struct msr *msrs = NULL;
- int cpu, idx = 0;
+ int cpu;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
@@ -2551,34 +2541,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
- if (!msrs) {
- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
- __func__);
- return -ENOMEM;
- }
-
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+
if (on) {
- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & K8_MSR_MCGCTL_NBE)
pvt->flags.ecc_report = 1;
- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
* Turn off ECC reporting only when it was off before
*/
if (!pvt->flags.ecc_report)
- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~K8_MSR_MCGCTL_NBE;
}
- idx++;
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
- kfree(msrs);
free_cpumask_var(cmask);
return 0;
@@ -3036,6 +3019,8 @@ static int __init amd64_edac_init(void)
if (cache_k8_northbridges() < 0)
return err;
+ msrs = msrs_alloc();
+
err = pci_register_driver(&amd64_pci_driver);
if (err)
return err;
@@ -3071,6 +3056,9 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver);
+
+ msrs_free(msrs);
+ msrs = NULL;
}
module_init(amd64_edac_init);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ebb9e51deb0c..1b03ba1d0834 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -7,7 +7,7 @@ menu "Firmware Drivers"
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
- depends on !IA64
+ depends on X86
help
Say Y or M here if you want to enable BIOS Enhanced Disk Drive
Services real mode BIOS calls to determine which disk
@@ -28,7 +28,7 @@ config EDD_OFF
config FIRMWARE_MEMMAP
bool "Add firmware-provided memory map to sysfs" if EMBEDDED
- default (X86_64 || X86_32)
+ default X86
help
Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
That memory map is used for example by kexec to set up parameter area
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3a2ccb09e2f8..31b983d9462c 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -169,10 +169,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
- sprintf(s,
- "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
- d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
- d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]);
+ sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2ad0128c63c6..57ca339924ef 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -174,6 +174,16 @@ config GPIO_ADP5520
comment "PCI GPIO expanders:"
+config GPIO_CS5535
+ tristate "AMD CS5535/CS5536 GPIO support"
+ depends on PCI && !CS5535_GPIO
+ help
+ The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
+ can be used for quite a number of things. The CS5535/6 is found on
+ AMD Geode and Lemote Yeeloong devices.
+
+ If unsure, say N.
+
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
depends on PCI && VIDEO_BT848=n
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 00a532c9a1e2..270b6d7839f5 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
+obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
diff --git a/drivers/gpio/adp5520-gpio.c b/drivers/gpio/adp5520-gpio.c
index ad05bbc7ffd5..0f93105873cd 100644
--- a/drivers/gpio/adp5520-gpio.c
+++ b/drivers/gpio/adp5520-gpio.c
@@ -34,9 +34,9 @@ static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
*/
if (test_bit(off, &dev->output))
- adp5520_read(dev->master, GPIO_OUT, &reg_val);
+ adp5520_read(dev->master, ADP5520_GPIO_OUT, &reg_val);
else
- adp5520_read(dev->master, GPIO_IN, &reg_val);
+ adp5520_read(dev->master, ADP5520_GPIO_IN, &reg_val);
return !!(reg_val & dev->lut[off]);
}
@@ -48,9 +48,9 @@ static void adp5520_gpio_set_value(struct gpio_chip *chip,
dev = container_of(chip, struct adp5520_gpio, gpio_chip);
if (val)
- adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ adp5520_set_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
else
- adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
}
static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
@@ -60,7 +60,8 @@ static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
clear_bit(off, &dev->output);
- return adp5520_clr_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+ return adp5520_clr_bits(dev->master, ADP5520_GPIO_CFG_2,
+ dev->lut[off]);
}
static int adp5520_gpio_direction_output(struct gpio_chip *chip,
@@ -73,18 +74,21 @@ static int adp5520_gpio_direction_output(struct gpio_chip *chip,
set_bit(off, &dev->output);
if (val)
- ret |= adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
else
- ret |= adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ ret |= adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
- ret |= adp5520_set_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_CFG_2,
+ dev->lut[off]);
return ret;
}
static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
{
- struct adp5520_gpio_platfrom_data *pdata = pdev->dev.platform_data;
+ struct adp5520_gpio_platform_data *pdata = pdev->dev.platform_data;
struct adp5520_gpio *dev;
struct gpio_chip *gc;
int ret, i, gpios;
@@ -129,20 +133,20 @@ static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
gc->label = pdev->name;
gc->owner = THIS_MODULE;
- ret = adp5520_clr_bits(dev->master, GPIO_CFG_1,
+ ret = adp5520_clr_bits(dev->master, ADP5520_GPIO_CFG_1,
pdata->gpio_en_mask);
- if (pdata->gpio_en_mask & GPIO_C3)
- ctl_mask |= C3_MODE;
+ if (pdata->gpio_en_mask & ADP5520_GPIO_C3)
+ ctl_mask |= ADP5520_C3_MODE;
- if (pdata->gpio_en_mask & GPIO_R3)
- ctl_mask |= R3_MODE;
+ if (pdata->gpio_en_mask & ADP5520_GPIO_R3)
+ ctl_mask |= ADP5520_R3_MODE;
if (ctl_mask)
- ret = adp5520_set_bits(dev->master, LED_CONTROL,
+ ret = adp5520_set_bits(dev->master, ADP5520_LED_CONTROL,
ctl_mask);
- ret |= adp5520_set_bits(dev->master, GPIO_PULLUP,
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_PULLUP,
pdata->gpio_pullup_mask);
if (ret) {
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
new file mode 100644
index 000000000000..0fdbe94f24a3
--- /dev/null
+++ b/drivers/gpio/cs5535-gpio.c
@@ -0,0 +1,355 @@
+/*
+ * AMD CS5535/CS5536 GPIO driver
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/cs5535.h>
+
+#define DRV_NAME "cs5535-gpio"
+#define GPIO_BAR 1
+
+/*
+ * Some GPIO pins
+ * 31-29,23 : reserved (always mask out)
+ * 28 : Power Button
+ * 26 : PME#
+ * 22-16 : LPC
+ * 14,15 : SMBus
+ * 9,8 : UART1
+ * 7 : PCI INTB
+ * 3,4 : UART2/DDC
+ * 2 : IDE_IRQ0
+ * 1 : AC_BEEP
+ * 0 : PCI INTA
+ *
+ * If a mask was not specified, allow all except
+ * reserved and Power Button
+ */
+#define GPIO_DEFAULT_MASK 0x0F7FFFFF
+
+static ulong mask = GPIO_DEFAULT_MASK;
+module_param_named(mask, mask, ulong, 0444);
+MODULE_PARM_DESC(mask, "GPIO channel mask.");
+
+static struct cs5535_gpio_chip {
+ struct gpio_chip chip;
+ resource_size_t base;
+
+ struct pci_dev *pdev;
+ spinlock_t lock;
+} cs5535_gpio_chip;
+
+/*
+ * The CS5535/CS5536 GPIOs support a number of extra features not defined
+ * by the gpio_chip API, so these are exported. For a full list of the
+ * registers, see include/linux/cs5535.h.
+ */
+
+static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
+ unsigned int reg)
+{
+ if (offset < 16)
+ /* low bank register */
+ outl(1 << offset, chip->base + reg);
+ else
+ /* high bank register */
+ outl(1 << (offset - 16), chip->base + 0x80 + reg);
+}
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_set(chip, offset, reg);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set);
+
+static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
+ unsigned int reg)
+{
+ if (offset < 16)
+ /* low bank register */
+ outl(1 << (offset + 16), chip->base + reg);
+ else
+ /* high bank register */
+ outl(1 << offset, chip->base + 0x80 + reg);
+}
+
+void cs5535_gpio_clear(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_clear(chip, offset, reg);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_clear);
+
+int cs5535_gpio_isset(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+ long val;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if (offset < 16)
+ /* low bank register */
+ val = inl(chip->base + reg);
+ else {
+ /* high bank register */
+ val = inl(chip->base + 0x80 + reg);
+ offset -= 16;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return (val & (1 << offset)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
+
+/*
+ * Generic gpio_chip API support.
+ */
+
+static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ /* check if this pin is available */
+ if ((mask & (1 << offset)) == 0) {
+ dev_info(&chip->pdev->dev,
+ "pin %u is not available (check mask)\n", offset);
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return -EINVAL;
+ }
+
+ /* disable output aux 1 & 2 on this pin */
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1);
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2);
+
+ /* disable input aux 1 on this pin */
+ __cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1);
+
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL);
+}
+
+static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ if (val)
+ cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
+ else
+ cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
+}
+
+static int chip_direction_input(struct gpio_chip *c, unsigned offset)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
+ if (val)
+ __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
+ else
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL);
+
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static char *cs5535_gpio_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3",
+ "GPIO4", "GPIO5", "GPIO6", "GPIO7",
+ "GPIO8", "GPIO9", "GPIO10", "GPIO11",
+ "GPIO12", "GPIO13", "GPIO14", "GPIO15",
+ "GPIO16", "GPIO17", "GPIO18", "GPIO19",
+ "GPIO20", "GPIO21", "GPIO22", NULL,
+ "GPIO24", "GPIO25", "GPIO26", "GPIO27",
+ "GPIO28", NULL, NULL, NULL,
+};
+
+static struct cs5535_gpio_chip cs5535_gpio_chip = {
+ .chip = {
+ .owner = THIS_MODULE,
+ .label = DRV_NAME,
+
+ .base = 0,
+ .ngpio = 32,
+ .names = cs5535_gpio_names,
+ .request = chip_gpio_request,
+
+ .get = chip_gpio_get,
+ .set = chip_gpio_set,
+
+ .direction_input = chip_direction_input,
+ .direction_output = chip_direction_output,
+ },
+};
+
+static int __init cs5535_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err;
+ ulong mask_orig = mask;
+
+ /* There are two ways to get the GPIO base address; one is by
+ * fetching it from MSR_LBAR_GPIO, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ err = pci_enable_device_io(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable device IO\n");
+ goto done;
+ }
+
+ err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
+ cs5535_gpio_chip.pdev = pdev;
+ spin_lock_init(&cs5535_gpio_chip.lock);
+
+ dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
+ (unsigned long long) cs5535_gpio_chip.base);
+
+ /* mask out reserved pins */
+ mask &= 0x1F7FFFFF;
+
+ /* do not allow pin 28, Power Button, as there's special handling
+ * in the PMC needed. (note 12, p. 48) */
+ mask &= ~(1 << 28);
+
+ if (mask_orig != mask)
+ dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
+ mask_orig, mask);
+
+ /* finally, register with the generic GPIO API */
+ err = gpiochip_add(&cs5535_gpio_chip.chip);
+ if (err)
+ goto release_region;
+
+ dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
+ return 0;
+
+release_region:
+ pci_release_region(pdev, GPIO_BAR);
+done:
+ return err;
+}
+
+static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
+{
+ int err;
+
+ err = gpiochip_remove(&cs5535_gpio_chip.chip);
+ if (err) {
+ /* uhh? */
+ dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+ }
+ pci_release_region(pdev, GPIO_BAR);
+}
+
+static struct pci_device_id cs5535_gpio_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
+
+/*
+ * We can't use the standard PCI driver registration stuff here, since
+ * that allows only one driver to bind to each PCI device (and we want
+ * multiple drivers to be able to bind to the device). Instead, manually
+ * scan for the PCI device, request a single region, and keep track of the
+ * devices that we're using.
+ */
+
+static int __init cs5535_gpio_scan_pci(void)
+{
+ struct pci_dev *pdev;
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
+ pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
+ cs5535_gpio_pci_tbl[i].device, NULL);
+ if (pdev) {
+ err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
+ if (err)
+ pci_dev_put(pdev);
+
+ /* we only support a single CS5535/6 southbridge */
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void __exit cs5535_gpio_free_pci(void)
+{
+ cs5535_gpio_remove(cs5535_gpio_chip.pdev);
+ pci_dev_put(cs5535_gpio_chip.pdev);
+}
+
+static int __init cs5535_gpio_init(void)
+{
+ return cs5535_gpio_scan_pci();
+}
+
+static void __exit cs5535_gpio_exit(void)
+{
+ cs5535_gpio_free_pci();
+}
+
+module_init(cs5535_gpio_init);
+module_exit(cs5535_gpio_exit);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
index 49384a7c5492..7fe881e2bdfb 100644
--- a/drivers/gpio/twl4030-gpio.c
+++ b/drivers/gpio/twl4030-gpio.c
@@ -34,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -80,7 +80,7 @@ static unsigned int gpio_usage_count;
*/
static inline int gpio_twl4030_write(u8 address, u8 data)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
+ return twl_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
}
/*----------------------------------------------------------------------*/
@@ -117,7 +117,7 @@ static inline int gpio_twl4030_read(u8 address)
u8 data;
int ret = 0;
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
return (ret < 0) ? ret : data;
}
@@ -142,7 +142,7 @@ static void twl4030_led_set_value(int led, int value)
cached_leden &= ~mask;
else
cached_leden |= mask;
- status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN);
mutex_unlock(&gpio_lock);
}
@@ -223,23 +223,23 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
}
/* initialize PWM to always-drive */
- status = twl4030_i2c_write_u8(module, 0x7f,
+ status = twl_i2c_write_u8(module, 0x7f,
TWL4030_PWMx_PWMxOFF);
if (status < 0)
goto done;
- status = twl4030_i2c_write_u8(module, 0x7f,
+ status = twl_i2c_write_u8(module, 0x7f,
TWL4030_PWMx_PWMxON);
if (status < 0)
goto done;
/* init LED to not-driven (high) */
module = TWL4030_MODULE_LED;
- status = twl4030_i2c_read_u8(module, &cached_leden,
+ status = twl_i2c_read_u8(module, &cached_leden,
TWL4030_LED_LEDEN);
if (status < 0)
goto done;
cached_leden &= ~ledclr_mask;
- status = twl4030_i2c_write_u8(module, cached_leden,
+ status = twl_i2c_write_u8(module, cached_leden,
TWL4030_LED_LEDEN);
if (status < 0)
goto done;
@@ -370,7 +370,7 @@ static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
message[i] = bit_mask;
}
- return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+ return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIOPUPDCTR1, 5);
}
@@ -387,7 +387,7 @@ static int __devinit gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
debounce >>= 8;
message[3] = (debounce & 0x03);
- return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+ return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
}
diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/wm831x-gpio.c
index f9c09a54ec7f..b4468b616890 100644
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/wm831x-gpio.c
@@ -22,8 +22,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/gpio.h>
-
-#define WM831X_GPIO_MAX 16
+#include <linux/mfd/wm831x/irq.h>
struct wm831x_gpio {
struct wm831x *wm831x;
@@ -80,6 +79,17 @@ static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
value << offset);
}
+static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
+ struct wm831x *wm831x = wm831x_gpio->wm831x;
+
+ if (!wm831x->irq_base)
+ return -EINVAL;
+
+ return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+}
+
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
@@ -175,6 +185,7 @@ static struct gpio_chip template_chip = {
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
+ .to_irq = wm831x_gpio_to_irq,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = 1,
};
@@ -192,7 +203,7 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
wm831x_gpio->wm831x = wm831x;
wm831x_gpio->gpio_chip = template_chip;
- wm831x_gpio->gpio_chip.ngpio = WM831X_GPIO_MAX;
+ wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio;
wm831x_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm831x_gpio->gpio_chip.base = pdata->gpio_base;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9e640c62ebd9..95ccbe377f9c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1046,25 +1046,27 @@ config SENSORS_ATK0110
will be called asus_atk0110.
config SENSORS_LIS3LV02D
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
+ tristate "STMicroeletronics LIS3* three-axis digital accelerometer"
depends on INPUT
select INPUT_POLLDEV
select NEW_LEDS
select LEDS_CLASS
default n
help
- This driver provides support for the LIS3LV02Dx accelerometer. In
- particular, it can be found in a number of HP laptops, which have the
- "Mobile Data Protection System 3D" or "3D DriveGuard" feature. On such
- systems the driver should load automatically (via ACPI). The
- accelerometer might also be found in other systems, connected via SPI
- or I2C. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
+ This driver provides support for the LIS3* accelerometers, such as the
+ LIS3LV02DL or the LIS331DL. In particular, it can be found in a number
+ of HP laptops, which have the "Mobile Data Protection System 3D" or
+ "3D DriveGuard" feature. On such systems the driver should load
+ automatically (via ACPI alias). The accelerometer might also be found
+ in other systems, connected via SPI or I2C. The accelerometer data is
+ readable via /sys/devices/platform/lis3lv02d.
This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick. On HP laptops,
+ a laptop to act as a pinball machine-esque joystick. It provides also
+ a misc device which can be used to detect free-fall. On HP laptops,
if the led infrastructure is activated, support for a led indicating
- disk protection will be provided as hp:red:hddprotection.
+ disk protection will be provided as hp::hddprotect. For more
+ information on the feature, refer to Documentation/hwmon/lis3lv02d.
This driver can also be built as modules. If so, the core module
will be called lis3lv02d and a specific module for HP laptops will be
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 33acf29531af..1ad0a885c5a5 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -34,9 +34,8 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm,
- mc1066);
+enum chips {
+ adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 };
/* adm1021 constants specified below */
@@ -97,7 +96,7 @@ struct adm1021_data {
static int adm1021_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1021_detect(struct i2c_client *client, int kind,
+static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1021_init_client(struct i2c_client *client);
static int adm1021_remove(struct i2c_client *client);
@@ -130,7 +129,7 @@ static struct i2c_driver adm1021_driver = {
.remove = adm1021_remove,
.id_table = adm1021_id,
.detect = adm1021_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t show_temp(struct device *dev,
@@ -284,7 +283,7 @@ static const struct attribute_group adm1021_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1021_detect(struct i2c_client *client, int kind,
+static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index db6ac2b04f6f..251b63165e2a 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -64,11 +64,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(adm1025, ne1619);
+enum chips { adm1025, ne1619 };
/*
* The ADM1025 registers
@@ -111,7 +107,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
static int adm1025_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1025_detect(struct i2c_client *client, int kind,
+static int adm1025_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1025_init_client(struct i2c_client *client);
static int adm1025_remove(struct i2c_client *client);
@@ -137,7 +133,7 @@ static struct i2c_driver adm1025_driver = {
.remove = adm1025_remove,
.id_table = adm1025_id,
.detect = adm1025_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -409,7 +405,7 @@ static const struct attribute_group adm1025_group_in4 = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1025_detect(struct i2c_client *client, int kind,
+static int adm1025_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index fb5363985e21..65335b268fa9 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -37,9 +37,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adm1026);
-
static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -293,7 +290,7 @@ struct adm1026_data {
static int adm1026_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1026_detect(struct i2c_client *client, int kind,
+static int adm1026_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adm1026_remove(struct i2c_client *client);
static int adm1026_read_value(struct i2c_client *client, u8 reg);
@@ -305,7 +302,7 @@ static void adm1026_init_client(struct i2c_client *client);
static const struct i2c_device_id adm1026_id[] = {
- { "adm1026", adm1026 },
+ { "adm1026", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1026_id);
@@ -319,7 +316,7 @@ static struct i2c_driver adm1026_driver = {
.remove = adm1026_remove,
.id_table = adm1026_id,
.detect = adm1026_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int adm1026_read_value(struct i2c_client *client, u8 reg)
@@ -1650,7 +1647,7 @@ static const struct attribute_group adm1026_group_in8_9 = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1026_detect(struct i2c_client *client, int kind,
+static int adm1026_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index ef91e2a4a567..0b8a3b145bd2 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -44,12 +44,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
};
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(adm1029);
-
-/*
* The ADM1029 registers
* Manufacturer ID is 0x41 for Analog Devices
*/
@@ -117,7 +111,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
static int adm1029_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1029_detect(struct i2c_client *client, int kind,
+static int adm1029_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adm1029_remove(struct i2c_client *client);
static struct adm1029_data *adm1029_update_device(struct device *dev);
@@ -128,7 +122,7 @@ static int adm1029_init_client(struct i2c_client *client);
*/
static const struct i2c_device_id adm1029_id[] = {
- { "adm1029", adm1029 },
+ { "adm1029", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1029_id);
@@ -142,7 +136,7 @@ static struct i2c_driver adm1029_driver = {
.remove = adm1029_remove,
.id_table = adm1029_id,
.detect = adm1029_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -297,7 +291,7 @@ static const struct attribute_group adm1029_group = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1029_detect(struct i2c_client *client, int kind,
+static int adm1029_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0e722175aae0..1644b92e7cc4 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -64,8 +64,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(adm1030, adm1031);
+enum chips { adm1030, adm1031 };
typedef u8 auto_chan_table_t[8][2];
@@ -102,7 +101,7 @@ struct adm1031_data {
static int adm1031_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1031_detect(struct i2c_client *client, int kind,
+static int adm1031_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1031_init_client(struct i2c_client *client);
static int adm1031_remove(struct i2c_client *client);
@@ -125,7 +124,7 @@ static struct i2c_driver adm1031_driver = {
.remove = adm1031_remove,
.id_table = adm1031_id,
.detect = adm1031_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg)
@@ -813,7 +812,7 @@ static const struct attribute_group adm1031_group_opt = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1031_detect(struct i2c_client *client, int kind,
+static int adm1031_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 20e0481cc206..0727ad250793 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -55,8 +55,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81);
+enum chips { adm9240, ds1780, lm81 };
/* ADM9240 registers */
#define ADM9240_REG_MAN_ID 0x3e
@@ -132,7 +131,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
static int adm9240_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm9240_detect(struct i2c_client *client, int kind,
+static int adm9240_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm9240_init_client(struct i2c_client *client);
static int adm9240_remove(struct i2c_client *client);
@@ -156,7 +155,7 @@ static struct i2c_driver adm9240_driver = {
.remove = adm9240_remove,
.id_table = adm9240_id,
.detect = adm9240_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* per client data */
@@ -545,7 +544,7 @@ static const struct attribute_group adm9240_group = {
/*** sensor chip detect and driver install ***/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm9240_detect(struct i2c_client *new_client, int kind,
+static int adm9240_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 451977bca7d6..aac85f3aed50 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -47,10 +47,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ads7828);
-
-/* Other module parameters */
+/* Module parameters */
static int se_input = 1; /* Default is SE, 0 == diff */
static int int_vref = 1; /* Default is internal ref ON */
static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */
@@ -72,7 +69,7 @@ struct ads7828_data {
};
/* Function declaration - necessary due to function dependencies */
-static int ads7828_detect(struct i2c_client *client, int kind,
+static int ads7828_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int ads7828_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -168,7 +165,7 @@ static int ads7828_remove(struct i2c_client *client)
}
static const struct i2c_device_id ads7828_id[] = {
- { "ads7828", ads7828 },
+ { "ads7828", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ads7828_id);
@@ -183,11 +180,11 @@ static struct i2c_driver ads7828_driver = {
.remove = ads7828_remove,
.id_table = ads7828_id,
.detect = ads7828_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ads7828_detect(struct i2c_client *client, int kind,
+static int ads7828_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index f9c9562b6a94..a1a7ef14b519 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -32,9 +32,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7462);
-
/* ADT7462 registers */
#define ADT7462_REG_DEVICE 0x3D
#define ADT7462_REG_VENDOR 0x3E
@@ -237,12 +234,12 @@ struct adt7462_data {
static int adt7462_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7462_detect(struct i2c_client *client, int kind,
+static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7462_remove(struct i2c_client *client);
static const struct i2c_device_id adt7462_id[] = {
- { "adt7462", adt7462 },
+ { "adt7462", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7462_id);
@@ -256,7 +253,7 @@ static struct i2c_driver adt7462_driver = {
.remove = adt7462_remove,
.id_table = adt7462_id,
.detect = adt7462_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1902,7 +1899,7 @@ static struct attribute *adt7462_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7462_detect(struct i2c_client *client, int kind,
+static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 32b1750a6890..3445ce1cba81 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -33,9 +33,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7470);
-
/* ADT7470 registers */
#define ADT7470_REG_BASE_ADDR 0x20
#define ADT7470_REG_TEMP_BASE_ADDR 0x20
@@ -177,12 +174,12 @@ struct adt7470_data {
static int adt7470_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7470_detect(struct i2c_client *client, int kind,
+static int adt7470_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7470_remove(struct i2c_client *client);
static const struct i2c_device_id adt7470_id[] = {
- { "adt7470", adt7470 },
+ { "adt7470", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7470_id);
@@ -196,7 +193,7 @@ static struct i2c_driver adt7470_driver = {
.remove = adt7470_remove,
.id_table = adt7470_id,
.detect = adt7470_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1225,7 +1222,7 @@ static struct attribute *adt7470_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7470_detect(struct i2c_client *client, int kind,
+static int adt7470_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index aea244db974e..434576f61c84 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -32,9 +32,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7473);
-
/* ADT7473 registers */
#define ADT7473_REG_BASE_ADDR 0x20
@@ -166,12 +163,12 @@ struct adt7473_data {
static int adt7473_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7473_detect(struct i2c_client *client, int kind,
+static int adt7473_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7473_remove(struct i2c_client *client);
static const struct i2c_device_id adt7473_id[] = {
- { "adt7473", adt7473 },
+ { "adt7473", 0 },
{ }
};
@@ -184,7 +181,7 @@ static struct i2c_driver adt7473_driver = {
.remove = adt7473_remove,
.id_table = adt7473_id,
.detect = adt7473_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1085,7 +1082,7 @@ static struct attribute *adt7473_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7473_detect(struct i2c_client *client, int kind,
+static int adt7473_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 99abfddedbc3..a0c385145686 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -148,7 +148,7 @@
static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD_4(adt7473, adt7475, adt7476, adt7490);
+enum chips { adt7473, adt7475, adt7476, adt7490 };
static const struct i2c_device_id adt7475_id[] = {
{ "adt7473", adt7473 },
@@ -1172,7 +1172,7 @@ static struct attribute_group in4_attr_group = { .attrs = in4_attrs };
static struct attribute_group in5_attr_group = { .attrs = in5_attrs };
static struct attribute_group vid_attr_group = { .attrs = vid_attrs };
-static int adt7475_detect(struct i2c_client *client, int kind,
+static int adt7475_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -1412,7 +1412,7 @@ static struct i2c_driver adt7475_driver = {
.remove = adt7475_remove,
.id_table = adt7475_id,
.detect = adt7475_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static void adt7475_read_hystersis(struct i2c_client *client)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 7ea6a8f66056..c1605b528e8f 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -518,7 +518,7 @@ static int applesmc_pm_restore(struct device *dev)
return applesmc_pm_resume(dev);
}
-static struct dev_pm_ops applesmc_pm_ops = {
+static const struct dev_pm_ops applesmc_pm_ops = {
.resume = applesmc_pm_resume,
.restore = applesmc_pm_restore,
};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 480f80ea1fa0..7dada559b3a1 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -51,9 +51,6 @@
/* I2C addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(asb100);
-
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -209,14 +206,14 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
static int asb100_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int asb100_detect(struct i2c_client *client, int kind,
+static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int asb100_remove(struct i2c_client *client);
static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
static const struct i2c_device_id asb100_id[] = {
- { "asb100", asb100 },
+ { "asb100", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, asb100_id);
@@ -230,7 +227,7 @@ static struct i2c_driver asb100_driver = {
.remove = asb100_remove,
.id_table = asb100_id,
.detect = asb100_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* 7 Voltages */
@@ -697,7 +694,7 @@ ERROR_SC_2:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int asb100_detect(struct i2c_client *client, int kind,
+static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d6b490d3e36f..94cadc19f0c5 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -44,17 +44,14 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD_1(atxp1);
-
static int atxp1_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int atxp1_remove(struct i2c_client *client);
static struct atxp1_data * atxp1_update_device(struct device *dev);
-static int atxp1_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
static const struct i2c_device_id atxp1_id[] = {
- { "atxp1", atxp1 },
+ { "atxp1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, atxp1_id);
@@ -68,7 +65,7 @@ static struct i2c_driver atxp1_driver = {
.remove = atxp1_remove,
.id_table = atxp1_id,
.detect = atxp1_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
struct atxp1_data {
@@ -275,7 +272,7 @@ static const struct attribute_group atxp1_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int atxp1_detect(struct i2c_client *new_client, int kind,
+static int atxp1_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 4377bb0cc526..823dd28a902c 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -57,11 +57,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
/* Addresses to scan */
static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(dme1737, sch5027);
-
-/* ISA chip types */
-enum isa_chips { sch311x = sch5027 + 1 };
+enum chips { dme1737, sch5027, sch311x };
/* ---------------------------------------------------------------------
* Registers
@@ -2208,7 +2204,7 @@ exit:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int dme1737_i2c_detect(struct i2c_client *client, int kind,
+static int dme1737_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -2318,7 +2314,7 @@ static struct i2c_driver dme1737_i2c_driver = {
.remove = dme1737_i2c_remove,
.id_table = dme1737_id,
.detect = dme1737_i2c_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* ---------------------------------------------------------------------
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 2a4c6a05b14f..e11363467a8d 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ds1621);
static int polarity = -1;
module_param(polarity, int, 0);
MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low");
@@ -224,7 +223,7 @@ static const struct attribute_group ds1621_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ds1621_detect(struct i2c_client *client, int kind,
+static int ds1621_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -305,8 +304,8 @@ static int ds1621_remove(struct i2c_client *client)
}
static const struct i2c_device_id ds1621_id[] = {
- { "ds1621", ds1621 },
- { "ds1625", ds1621 },
+ { "ds1621", 0 },
+ { "ds1625", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1621_id);
@@ -321,7 +320,7 @@ static struct i2c_driver ds1621_driver = {
.remove = ds1621_remove,
.id_table = ds1621_id,
.detect = ds1621_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init ds1621_init(void)
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 40dfbcd3f3f2..277398f9c938 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -39,8 +39,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(f75373, f75375);
+enum chips { f75373, f75375 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
@@ -113,7 +112,7 @@ struct f75375_data {
s8 temp_max_hyst[2];
};
-static int f75375_detect(struct i2c_client *client, int kind,
+static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -135,7 +134,7 @@ static struct i2c_driver f75375_driver = {
.remove = f75375_remove,
.id_table = f75375_id,
.detect = f75375_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -677,7 +676,7 @@ static int f75375_remove(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int f75375_detect(struct i2c_client *client, int kind,
+static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 281829cd1533..bd0fc67e804b 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -56,7 +56,8 @@ static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-I2C_CLIENT_INSMOD_7(fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl);
+
+enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl };
/*
* The FSCHMD registers and other defines
@@ -221,7 +222,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
static int fschmd_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int fschmd_detect(struct i2c_client *client, int kind,
+static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int fschmd_remove(struct i2c_client *client);
static struct fschmd_data *fschmd_update_device(struct device *dev);
@@ -251,7 +252,7 @@ static struct i2c_driver fschmd_driver = {
.remove = fschmd_remove,
.id_table = fschmd_id,
.detect = fschmd_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1000,7 +1001,7 @@ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy)
}
}
-static int fschmd_detect(struct i2c_client *client, int _kind,
+static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 1d69458aa0b6..e7ae5743e181 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -46,8 +46,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80);
+enum chips { gl518sm_r00, gl518sm_r80 };
/* Many GL518 constants specified below */
@@ -139,8 +138,7 @@ struct gl518_data {
static int gl518_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int gl518_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info);
static void gl518_init_client(struct i2c_client *client);
static int gl518_remove(struct i2c_client *client);
static int gl518_read_value(struct i2c_client *client, u8 reg);
@@ -163,7 +161,7 @@ static struct i2c_driver gl518_driver = {
.remove = gl518_remove,
.id_table = gl518_id,
.detect = gl518_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -484,8 +482,7 @@ static const struct attribute_group gl518_group_r80 = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int gl518_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int rev;
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 92b5720ceaff..ec588026f0a9 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -41,9 +41,6 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(gl520sm);
-
/* Many GL520 constants specified below
One of the inputs can be configured as either temp or voltage.
That's why _TEMP2 and _IN4 access the same register
@@ -81,8 +78,7 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 };
static int gl520_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int gl520_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info);
static void gl520_init_client(struct i2c_client *client);
static int gl520_remove(struct i2c_client *client);
static int gl520_read_value(struct i2c_client *client, u8 reg);
@@ -91,7 +87,7 @@ static struct gl520_data *gl520_update_device(struct device *dev);
/* Driver data */
static const struct i2c_device_id gl520_id[] = {
- { "gl520sm", gl520sm },
+ { "gl520sm", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, gl520_id);
@@ -105,7 +101,7 @@ static struct i2c_driver gl520_driver = {
.remove = gl520_remove,
.id_table = gl520_id,
.detect = gl520_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Client data */
@@ -681,8 +677,7 @@ static const struct attribute_group gl520_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int gl520_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index cf5afb9a10ab..b2f2277cad3c 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -43,13 +43,30 @@
#define MDPS_POLL_INTERVAL 50
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
- * because their are generated even if the data do not change. So it's better
+ * because they are generated even if the data do not change. So it's better
* to keep the interrupt for the free-fall event. The values are updated at
* 40Hz (at the lowest frequency), but as it can be pretty time consuming on
* some low processor, we poll the sensor only at 20Hz... enough for the
* joystick.
*/
+#define LIS3_PWRON_DELAY_WAI_12B (5000)
+#define LIS3_PWRON_DELAY_WAI_8B (3000)
+
+/*
+ * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG
+ * LIS302D spec says: 18 mG / digit
+ * LIS3_ACCURACY is used to increase accuracy of the intermediate
+ * calculation results.
+ */
+#define LIS3_ACCURACY 1024
+/* Sensitivity values for -2G +2G scale */
+#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
+#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
+
+#define LIS3_DEFAULT_FUZZ 3
+#define LIS3_DEFAULT_FLAT 3
+
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
@@ -65,7 +82,7 @@ static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
return lo;
}
-static s16 lis3lv02d_read_16(struct lis3lv02d *lis3, int reg)
+static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg)
{
u8 lo, hi;
@@ -102,16 +119,106 @@ static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
{
int position[3];
+ int i;
+ mutex_lock(&lis3->mutex);
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
+ mutex_unlock(&lis3->mutex);
+
+ for (i = 0; i < 3; i++)
+ position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
*x = lis3lv02d_get_axis(lis3->ac.x, position);
*y = lis3lv02d_get_axis(lis3->ac.y, position);
*z = lis3lv02d_get_axis(lis3->ac.z, position);
}
+/* conversion btw sampling rate and the register values */
+static int lis3_12_rates[4] = {40, 160, 640, 2560};
+static int lis3_8_rates[2] = {100, 400};
+
+/* ODR is Output Data Rate */
+static int lis3lv02d_get_odr(void)
+{
+ u8 ctrl;
+ int shift;
+
+ lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
+ ctrl &= lis3_dev.odr_mask;
+ shift = ffs(lis3_dev.odr_mask) - 1;
+ return lis3_dev.odrs[(ctrl >> shift)];
+}
+
+static int lis3lv02d_set_odr(int rate)
+{
+ u8 ctrl;
+ int i, len, shift;
+
+ lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
+ ctrl &= ~lis3_dev.odr_mask;
+ len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
+ shift = ffs(lis3_dev.odr_mask) - 1;
+
+ for (i = 0; i < len; i++)
+ if (lis3_dev.odrs[i] == rate) {
+ lis3_dev.write(&lis3_dev, CTRL_REG1,
+ ctrl | (i << shift));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
+{
+ u8 reg;
+ s16 x, y, z;
+ u8 selftest;
+ int ret;
+
+ mutex_lock(&lis3->mutex);
+ if (lis3_dev.whoami == WAI_12B)
+ selftest = CTRL1_ST;
+ else
+ selftest = CTRL1_STP;
+
+ lis3->read(lis3, CTRL_REG1, &reg);
+ lis3->write(lis3, CTRL_REG1, (reg | selftest));
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
+ /* Read directly to avoid axis remap */
+ x = lis3->read_data(lis3, OUTX);
+ y = lis3->read_data(lis3, OUTY);
+ z = lis3->read_data(lis3, OUTZ);
+
+ /* back to normal settings */
+ lis3->write(lis3, CTRL_REG1, reg);
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
+ results[0] = x - lis3->read_data(lis3, OUTX);
+ results[1] = y - lis3->read_data(lis3, OUTY);
+ results[2] = z - lis3->read_data(lis3, OUTZ);
+
+ ret = 0;
+ if (lis3->pdata) {
+ int i;
+ for (i = 0; i < 3; i++) {
+ /* Check against selftest acceptance limits */
+ if ((results[i] < lis3->pdata->st_min_limits[i]) ||
+ (results[i] > lis3->pdata->st_max_limits[i])) {
+ ret = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ /* test passed */
+fail:
+ mutex_unlock(&lis3->mutex);
+ return ret;
+}
+
void lis3lv02d_poweroff(struct lis3lv02d *lis3)
{
/* disable X,Y,Z axis and power down */
@@ -125,14 +232,19 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
lis3->init(lis3);
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
/*
* Common configuration
- * BDU: LSB and MSB values are not updated until both have been read.
- * So the value read will always be correct.
+ * BDU: (12 bits sensors only) LSB and MSB values are not updated until
+ * both have been read. So the value read will always be correct.
*/
- lis3->read(lis3, CTRL_REG2, &reg);
- reg |= CTRL2_BDU;
- lis3->write(lis3, CTRL_REG2, reg);
+ if (lis3->whoami == WAI_12B) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ reg |= CTRL2_BDU;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
@@ -273,22 +385,17 @@ static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
int x, y, z;
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
- input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
- input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
-}
-
-
-static inline void lis3lv02d_calibrate_joystick(void)
-{
- lis3lv02d_get_xyz(&lis3_dev,
- &lis3_dev.xcalib, &lis3_dev.ycalib, &lis3_dev.zcalib);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
}
int lis3lv02d_joystick_enable(void)
{
struct input_dev *input_dev;
int err;
+ int max_val, fuzz, flat;
if (lis3_dev.idev)
return -EINVAL;
@@ -301,8 +408,6 @@ int lis3lv02d_joystick_enable(void)
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
input_dev = lis3_dev.idev->input;
- lis3lv02d_calibrate_joystick();
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
@@ -310,9 +415,12 @@ int lis3lv02d_joystick_enable(void)
input_dev->dev.parent = &lis3_dev.pdev->dev;
set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+ max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY;
+ fuzz = (LIS3_DEFAULT_FUZZ * lis3_dev.scale) / LIS3_ACCURACY;
+ flat = (LIS3_DEFAULT_FLAT * lis3_dev.scale) / LIS3_ACCURACY;
+ input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
err = input_register_polled_device(lis3_dev.idev);
if (err) {
@@ -332,11 +440,23 @@ void lis3lv02d_joystick_disable(void)
if (lis3_dev.irq)
misc_deregister(&lis3lv02d_misc_device);
input_unregister_polled_device(lis3_dev.idev);
+ input_free_polled_device(lis3_dev.idev);
lis3_dev.idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
/* Sysfs stuff */
+static ssize_t lis3lv02d_selftest_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int result;
+ s16 values[3];
+
+ result = lis3lv02d_selftest(&lis3_dev, values);
+ return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL",
+ values[0], values[1], values[2]);
+}
+
static ssize_t lis3lv02d_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -346,41 +466,35 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
-static ssize_t lis3lv02d_calibrate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t lis3lv02d_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "(%d,%d,%d)\n", lis3_dev.xcalib, lis3_dev.ycalib, lis3_dev.zcalib);
+ return sprintf(buf, "%d\n", lis3lv02d_get_odr());
}
-static ssize_t lis3lv02d_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t lis3lv02d_rate_set(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
- lis3lv02d_calibrate_joystick();
- return count;
-}
+ unsigned long rate;
-/* conversion btw sampling rate and the register values */
-static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
-static ssize_t lis3lv02d_rate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 ctrl;
- int val;
+ if (strict_strtoul(buf, 0, &rate))
+ return -EINVAL;
- lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
- return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
+ if (lis3lv02d_set_odr(rate))
+ return -EINVAL;
+
+ return count;
}
+static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
-static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show,
- lis3lv02d_calibrate_store);
-static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL);
+static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
+ lis3lv02d_rate_set);
static struct attribute *lis3lv02d_attributes[] = {
+ &dev_attr_selftest.attr,
&dev_attr_position.attr,
- &dev_attr_calibrate.attr,
&dev_attr_rate.attr,
NULL
};
@@ -409,22 +523,30 @@ EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
/*
* Initialise the accelerometer and the various subsystems.
- * Should be rather independant of the bus system.
+ * Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *dev)
{
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
switch (dev->whoami) {
- case LIS_DOUBLE_ID:
- printk(KERN_INFO DRIVER_NAME ": 2-byte sensor found\n");
- dev->read_data = lis3lv02d_read_16;
+ case WAI_12B:
+ printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
+ dev->read_data = lis3lv02d_read_12;
dev->mdps_max_val = 2048;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
+ dev->odrs = lis3_12_rates;
+ dev->odr_mask = CTRL1_DF0 | CTRL1_DF1;
+ dev->scale = LIS3_SENSITIVITY_12B;
break;
- case LIS_SINGLE_ID:
- printk(KERN_INFO DRIVER_NAME ": 1-byte sensor found\n");
+ case WAI_8B:
+ printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
dev->read_data = lis3lv02d_read_8;
dev->mdps_max_val = 128;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ dev->odrs = lis3_8_rates;
+ dev->odr_mask = CTRL1_DR;
+ dev->scale = LIS3_SENSITIVITY_8B;
break;
default:
printk(KERN_ERR DRIVER_NAME
@@ -432,6 +554,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
return -EINVAL;
}
+ mutex_init(&dev->mutex);
+
lis3lv02d_add_fs(dev);
lis3lv02d_poweron(dev);
@@ -443,7 +567,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->pdata) {
struct lis3lv02d_platform_data *p = dev->pdata;
- if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ if (p->click_flags && (dev->whoami == WAI_8B)) {
dev->write(dev, CLICK_CFG, p->click_flags);
dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
dev->write(dev, CLICK_LATENCY, p->click_latency);
@@ -454,7 +578,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
(p->click_thresh_y << 4));
}
- if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
/* default to 2.5ms for now */
@@ -484,4 +608,3 @@ EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 3e1ff46f72d3..e6a01f44709b 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -2,7 +2,7 @@
* lis3lv02d.h - ST LIS3LV02DL accelerometer driver
*
* Copyright (C) 2007-2008 Yan Burman
- * Copyright (C) 2008 Eric Piel
+ * Copyright (C) 2008-2009 Eric Piel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,20 +22,18 @@
#include <linux/input-polldev.h>
/*
- * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
- * be connected via SPI. There exists also several similar chips (such as LIS302DL or
- * LIS3L02DQ) and they have slightly different registers, but we can provide a
- * common interface for all of them.
- * They can also be connected via I²C.
+ * This driver tries to support the "digital" accelerometer chips from
+ * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL,
+ * LIS35DE, or LIS202DL. They are very similar in terms of programming, with
+ * almost the same registers. In addition to differing on physical properties,
+ * they differ on the number of axes (2/3), precision (8/12 bits), and special
+ * features (freefall detection, click...). Unfortunately, not all the
+ * differences can be probed via a register.
+ * They can be connected either via I²C or SPI.
*/
#include <linux/lis3lv02d.h>
-/* 2-byte registers */
-#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
-/* 1-byte registers */
-#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
-
enum lis3_reg {
WHO_AM_I = 0x0F,
OFFSET_X = 0x16,
@@ -94,7 +92,13 @@ enum lis3lv02d_reg {
DD_THSE_H = 0x3F,
};
-enum lis3lv02d_ctrl1 {
+enum lis3_who_am_i {
+ WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
+ WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
+ WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
+};
+
+enum lis3lv02d_ctrl1_12b {
CTRL1_Xen = 0x01,
CTRL1_Yen = 0x02,
CTRL1_Zen = 0x04,
@@ -104,6 +108,16 @@ enum lis3lv02d_ctrl1 {
CTRL1_PD0 = 0x40,
CTRL1_PD1 = 0x80,
};
+
+/* Delta to ctrl1_12b version */
+enum lis3lv02d_ctrl1_8b {
+ CTRL1_STM = 0x08,
+ CTRL1_STP = 0x10,
+ CTRL1_FS = 0x20,
+ CTRL1_PD = 0x40,
+ CTRL1_DR = 0x80,
+};
+
enum lis3lv02d_ctrl2 {
CTRL2_DAS = 0x01,
CTRL2_SIM = 0x02,
@@ -194,16 +208,20 @@ struct lis3lv02d {
int (*write) (struct lis3lv02d *lis3, int reg, u8 val);
int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret);
- u8 whoami; /* 3Ah: 2-byte registries, 3Bh: 1-byte registries */
+ int *odrs; /* Supported output data rates */
+ u8 odr_mask; /* ODR bit mask */
+ u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
int mdps_max_val;
+ int pwron_delay;
+ int scale; /*
+ * relationship between 1 LBS and mG
+ * (1/1000th of earth gravity)
+ */
struct input_polled_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
- int xcalib; /* calibrated null value for x */
- int ycalib; /* calibrated null value for y */
- int zcalib; /* calibrated null value for z */
struct axis_conversion ac; /* hw -> logical axis */
u32 irq; /* IRQ number */
@@ -212,6 +230,7 @@ struct lis3lv02d {
unsigned long misc_opened; /* bit0: whether the device is open */
struct lis3lv02d_platform_data *pdata; /* for passing board config */
+ struct mutex mutex; /* Serialize poll and selftest */
};
int lis3lv02d_init_device(struct lis3lv02d *lis3);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 5da66ab04f74..bf81aff7051d 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -56,12 +56,6 @@
static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(lm63);
-
-/*
* The LM63 registers
*/
@@ -134,8 +128,7 @@ static int lm63_remove(struct i2c_client *client);
static struct lm63_data *lm63_update_device(struct device *dev);
-static int lm63_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
/*
@@ -143,7 +136,7 @@ static void lm63_init_client(struct i2c_client *client);
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", lm63 },
+ { "lm63", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -157,7 +150,7 @@ static struct i2c_driver lm63_driver = {
.remove = lm63_remove,
.id_table = lm63_id,
.detect = lm63_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -423,7 +416,7 @@ static const struct attribute_group lm63_group_fan1 = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm63_detect(struct i2c_client *new_client, int kind,
+static int lm63_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 0bf8b2a8e9f0..c5f39ba103c0 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -27,9 +27,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
0x4d, 0x4e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm73);
-
/* LM73 registers */
#define LM73_REG_INPUT 0x00
#define LM73_REG_CONF 0x01
@@ -145,13 +142,13 @@ static int lm73_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm73_ids[] = {
- { "lm73", lm73 },
+ { "lm73", 0 },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm73_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm73_detect(struct i2c_client *new_client, int kind,
+static int lm73_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -182,7 +179,7 @@ static struct i2c_driver lm73_driver = {
.remove = lm73_remove,
.id_table = lm73_ids,
.detect = lm73_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* module glue */
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e392548cccb8..8ae2cfe2d827 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -32,15 +32,12 @@
/*
* This driver handles the LM75 and compatible digital temperature sensors.
- * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be
- * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow
- * definition of up to 8 chip types (plus zero).
*/
enum lm75_type { /* keep sorted in alphabetical order */
- ds1775 = 9,
+ ds1775,
ds75,
- /* lm75 -- in I2C_CLIENT_INSMOD_1() */
+ lm75,
lm75a,
max6625,
max6626,
@@ -58,9 +55,6 @@ enum lm75_type { /* keep sorted in alphabetical order */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm75);
-
/* The LM75 registers */
#define LM75_REG_CONF 0x01
@@ -234,7 +228,7 @@ static const struct i2c_device_id lm75_ids[] = {
MODULE_DEVICE_TABLE(i2c, lm75_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm75_detect(struct i2c_client *new_client, int kind,
+static int lm75_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -295,7 +289,7 @@ static struct i2c_driver lm75_driver = {
.remove = lm75_remove,
.id_table = lm75_ids,
.detect = lm75_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index ac067fd19482..b28a297be50c 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -39,9 +39,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm77);
-
/* The LM77 registers */
#define LM77_REG_TEMP 0x00
#define LM77_REG_CONF 0x01
@@ -66,8 +63,7 @@ struct lm77_data {
static int lm77_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm77_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm77_init_client(struct i2c_client *client);
static int lm77_remove(struct i2c_client *client);
static u16 lm77_read_value(struct i2c_client *client, u8 reg);
@@ -77,7 +73,7 @@ static struct lm77_data *lm77_update_device(struct device *dev);
static const struct i2c_device_id lm77_id[] = {
- { "lm77", lm77 },
+ { "lm77", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm77_id);
@@ -92,7 +88,7 @@ static struct i2c_driver lm77_driver = {
.remove = lm77_remove,
.id_table = lm77_id,
.detect = lm77_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* straight from the datasheet */
@@ -245,7 +241,7 @@ static const struct attribute_group lm77_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm77_detect(struct i2c_client *new_client, int kind,
+static int lm77_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 5978291cebb3..cadcbd90ff3b 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -41,8 +41,7 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
static unsigned short isa_address = 0x290;
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(lm78, lm79);
+enum chips { lm78, lm79 };
/* Many LM78 constants specified below */
@@ -142,7 +141,7 @@ struct lm78_data {
};
-static int lm78_i2c_detect(struct i2c_client *client, int kind,
+static int lm78_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int lm78_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -173,7 +172,7 @@ static struct i2c_driver lm78_driver = {
.remove = lm78_i2c_remove,
.id_table = lm78_i2c_id,
.detect = lm78_i2c_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static struct platform_driver lm78_isa_driver = {
@@ -558,7 +557,7 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
return 1;
}
-static int lm78_i2c_detect(struct i2c_client *client, int kind,
+static int lm78_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int i;
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index bcffc1899403..18a0e6c5fe88 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -35,9 +35,6 @@
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm80);
-
/* Many LM80 constants specified below */
/* The LM80 registers */
@@ -133,8 +130,7 @@ struct lm80_data {
static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm80_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm80_init_client(struct i2c_client *client);
static int lm80_remove(struct i2c_client *client);
static struct lm80_data *lm80_update_device(struct device *dev);
@@ -146,7 +142,7 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
*/
static const struct i2c_device_id lm80_id[] = {
- { "lm80", lm80 },
+ { "lm80", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm80_id);
@@ -160,7 +156,7 @@ static struct i2c_driver lm80_driver = {
.remove = lm80_remove,
.id_table = lm80_id,
.detect = lm80_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -447,8 +443,7 @@ static const struct attribute_group lm80_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm80_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int i, cur;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 08b03e6ed0b7..8290476aee4a 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -51,11 +51,7 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(lm83, lm82);
+enum chips { lm83, lm82 };
/*
* The LM83 registers
@@ -118,7 +114,7 @@ static const u8 LM83_REG_W_HIGH[] = {
* Functions declaration
*/
-static int lm83_detect(struct i2c_client *new_client, int kind,
+static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info);
static int lm83_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -145,7 +141,7 @@ static struct i2c_driver lm83_driver = {
.remove = lm83_remove,
.id_table = lm83_id,
.detect = lm83_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -291,7 +287,7 @@ static const struct attribute_group lm83_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm83_detect(struct i2c_client *new_client, int kind,
+static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d56da2e74708..b3841a615595 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -38,9 +38,11 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
- emc6d102);
+enum chips {
+ any_chip, lm85b, lm85c,
+ adm1027, adt7463, adt7468,
+ emc6d100, emc6d102
+};
/* The LM85 registers */
@@ -323,8 +325,7 @@ struct lm85_data {
struct lm85_zone zone[3];
};
-static int lm85_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info);
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int lm85_remove(struct i2c_client *client);
@@ -357,7 +358,7 @@ static struct i2c_driver lm85_driver = {
.remove = lm85_remove,
.id_table = lm85_id,
.detect = lm85_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
@@ -1156,8 +1157,7 @@ static int lm85_is_fake(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm85_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 4929b1815eee..f1e6e7512ffa 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -74,11 +74,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(lm87, adm1024);
+enum chips { lm87, adm1024 };
/*
* The LM87 registers
@@ -158,7 +154,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
static int lm87_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm87_detect(struct i2c_client *new_client, int kind,
+static int lm87_detect(struct i2c_client *new_client,
struct i2c_board_info *info);
static void lm87_init_client(struct i2c_client *client);
static int lm87_remove(struct i2c_client *client);
@@ -184,7 +180,7 @@ static struct i2c_driver lm87_driver = {
.remove = lm87_remove,
.id_table = lm87_id,
.detect = lm87_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -662,7 +658,7 @@ static const struct attribute_group lm87_group_opt = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm87_detect(struct i2c_client *new_client, int kind,
+static int lm87_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index b7c905f50ed4..7c9bdc167426 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -93,12 +93,7 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
- max6646);
+enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 };
/*
* The LM90 registers
@@ -152,8 +147,7 @@ I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
* Functions declaration
*/
-static int lm90_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info);
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static void lm90_init_client(struct i2c_client *client);
@@ -192,7 +186,7 @@ static struct i2c_driver lm90_driver = {
.remove = lm90_remove,
.id_table = lm90_id,
.detect = lm90_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -656,7 +650,7 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm90_detect(struct i2c_client *new_client, int kind,
+static int lm90_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 47ac698709dc..7c31e6205f85 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -54,9 +54,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm92);
-
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
@@ -319,7 +316,7 @@ static const struct attribute_group lm92_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm92_detect(struct i2c_client *new_client, int kind,
+static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -401,7 +398,7 @@ static int lm92_remove(struct i2c_client *client)
*/
static const struct i2c_device_id lm92_id[] = {
- { "lm92", lm92 },
+ { "lm92", 0 },
/* max6635 could be added here */
{ }
};
@@ -416,7 +413,7 @@ static struct i2c_driver lm92_driver = {
.remove = lm92_remove,
.id_table = lm92_id,
.detect = lm92_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init sensors_lm92_init(void)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 124dd7cea54c..6669255aadcf 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -145,7 +145,6 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm93);
static int disable_block;
module_param(disable_block, bool, 0);
@@ -2501,8 +2500,7 @@ static void lm93_init_client(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm93_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int mfr, ver;
@@ -2603,7 +2601,7 @@ static int lm93_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm93_id[] = {
- { "lm93", lm93 },
+ { "lm93", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm93_id);
@@ -2617,7 +2615,7 @@ static struct i2c_driver lm93_driver = {
.remove = lm93_remove,
.id_table = lm93_id,
.detect = lm93_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init lm93_init(void)
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 906b896cf1d0..8fc8eb8cba47 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -39,9 +39,6 @@
static const unsigned short normal_i2c[] = {
0x19, 0x2a, 0x2b, I2C_CLIENT_END};
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm95241);
-
/* LM95241 registers */
#define LM95241_REG_R_MAN_ID 0xFE
#define LM95241_REG_R_CHIP_ID 0xFF
@@ -310,7 +307,7 @@ static const struct attribute_group lm95241_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm95241_detect(struct i2c_client *new_client, int kind,
+static int lm95241_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -446,7 +443,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { "lm95241", lm95241 },
+ { "lm95241", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
@@ -460,7 +457,7 @@ static struct i2c_driver lm95241_driver = {
.remove = lm95241_remove,
.id_table = lm95241_id,
.detect = lm95241_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init sensors_lm95241_init(void)
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 7fcf5ff89e7f..022ded098100 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -41,12 +41,6 @@ static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(max1619);
-
-/*
* The MAX1619 registers
*/
@@ -88,7 +82,7 @@ static int temp_to_reg(int val)
static int max1619_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int max1619_detect(struct i2c_client *client, int kind,
+static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void max1619_init_client(struct i2c_client *client);
static int max1619_remove(struct i2c_client *client);
@@ -99,7 +93,7 @@ static struct max1619_data *max1619_update_device(struct device *dev);
*/
static const struct i2c_device_id max1619_id[] = {
- { "max1619", max1619 },
+ { "max1619", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
@@ -113,7 +107,7 @@ static struct i2c_driver max1619_driver = {
.remove = max1619_remove,
.id_table = max1619_id,
.detect = max1619_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -226,7 +220,7 @@ static const struct attribute_group max1619_group = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max1619_detect(struct i2c_client *client, int kind,
+static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 1da561e0cb37..a0160ee5caef 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -62,8 +62,6 @@ module_param(fan_voltage, int, S_IRUGO);
module_param(prescaler, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
-I2C_CLIENT_INSMOD_1(max6650);
-
/*
* MAX 6650/6651 registers
*/
@@ -116,7 +114,7 @@ I2C_CLIENT_INSMOD_1(max6650);
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int max6650_detect(struct i2c_client *client, int kind,
+static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int max6650_init_client(struct i2c_client *client);
static int max6650_remove(struct i2c_client *client);
@@ -127,7 +125,7 @@ static struct max6650_data *max6650_update_device(struct device *dev);
*/
static const struct i2c_device_id max6650_id[] = {
- { "max6650", max6650 },
+ { "max6650", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6650_id);
@@ -141,7 +139,7 @@ static struct i2c_driver max6650_driver = {
.remove = max6650_remove,
.id_table = max6650_id,
.detect = max6650_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -528,7 +526,7 @@ static struct attribute_group max6650_attr_grp = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max6650_detect(struct i2c_client *client, int kind,
+static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 1d7ffebd679d..d44787949851 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -29,7 +29,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(pcf8591);
static int input_mode;
module_param(input_mode, int, 0);
@@ -169,7 +168,7 @@ static const struct attribute_group pcf8591_attr_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pcf8591_detect(struct i2c_client *client, int kind,
+static int pcf8591_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -299,7 +298,7 @@ static struct i2c_driver pcf8591_driver = {
.class = I2C_CLASS_HWMON, /* Nearest choice */
.detect = pcf8591_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init pcf8591_init(void)
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 4d88c045781c..40b26673d87f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -36,9 +36,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(smsc47m192);
-
/* SMSC47M192 registers */
#define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \
(0x50 + (nr) - 6))
@@ -115,13 +112,13 @@ struct smsc47m192_data {
static int smsc47m192_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int smsc47m192_detect(struct i2c_client *client, int kind,
+static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int smsc47m192_remove(struct i2c_client *client);
static struct smsc47m192_data *smsc47m192_update_device(struct device *dev);
static const struct i2c_device_id smsc47m192_id[] = {
- { "smsc47m192", smsc47m192 },
+ { "smsc47m192", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
@@ -135,7 +132,7 @@ static struct i2c_driver smsc47m192_driver = {
.remove = smsc47m192_remove,
.id_table = smsc47m192_id,
.detect = smsc47m192_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Voltages */
@@ -481,7 +478,7 @@ static void smsc47m192_init_client(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int smsc47m192_detect(struct i2c_client *client, int kind,
+static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 4b793849c738..7dfb4dec4c5f 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(thmc50, adm1022);
+enum chips { thmc50, adm1022 };
static unsigned short adm1022_temp3[16];
static unsigned int adm1022_temp3_num;
@@ -84,7 +84,7 @@ struct thmc50_data {
u8 alarms;
};
-static int thmc50_detect(struct i2c_client *client, int kind,
+static int thmc50_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int thmc50_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -108,7 +108,7 @@ static struct i2c_driver thmc50_driver = {
.remove = thmc50_remove,
.id_table = thmc50_id,
.detect = thmc50_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t show_analog_out(struct device *dev,
@@ -286,7 +286,7 @@ static const struct attribute_group temp3_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int thmc50_detect(struct i2c_client *client, int kind,
+static int thmc50_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
unsigned company;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ee9673467c4a..a13b30e8d8d8 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -42,8 +42,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(tmp401, tmp411);
+enum chips { tmp401, tmp411 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -98,7 +97,7 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client, int kind,
+static int tmp401_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int tmp401_remove(struct i2c_client *client);
static struct tmp401_data *tmp401_update_device(struct device *dev);
@@ -123,7 +122,7 @@ static struct i2c_driver tmp401_driver = {
.remove = tmp401_remove,
.id_table = tmp401_id,
.detect = tmp401_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -488,7 +487,7 @@ static void tmp401_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config);
}
-static int tmp401_detect(struct i2c_client *client, int _kind,
+static int tmp401_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index bb5464a289ca..4f7c051e2d7b 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -39,8 +39,7 @@
static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423);
+enum chips { tmp421, tmp422, tmp423 };
/* The TMP421 registers */
#define TMP421_CONFIG_REG_1 0x09
@@ -223,7 +222,7 @@ static int tmp421_init_client(struct i2c_client *client)
return 0;
}
-static int tmp421_detect(struct i2c_client *client, int _kind,
+static int tmp421_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
@@ -322,7 +321,7 @@ static struct i2c_driver tmp421_driver = {
.remove = tmp421_remove,
.id_table = tmp421_id,
.detect = tmp421_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init tmp421_init(void)
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index bb5e78748783..0dcaba9b7189 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -5,6 +5,7 @@
Copyright (C) 2006 Yuan Mu (Winbond),
Rudolf Marek <r.marek@assembler.cz>
David Hubbard <david.c.hubbard@gmail.com>
+ Daniel J Blueman <daniel.blueman@gmail.com>
Shamelessly ripped from the w83627hf driver
Copyright (C) 2003 Mark Studebaker
@@ -177,12 +178,15 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
#define W83627EHF_REG_ALARM3 0x45B
/* SmartFan registers */
+#define W83627EHF_REG_FAN_STEPUP_TIME 0x0f
+#define W83627EHF_REG_FAN_STEPDOWN_TIME 0x0e
+
/* DC or PWM output fan configuration */
static const u8 W83627EHF_REG_PWM_ENABLE[] = {
0x04, /* SYS FAN0 output mode and PWM mode */
0x04, /* CPU FAN0 output mode and PWM mode */
0x12, /* AUX FAN mode */
- 0x62, /* CPU fan1 mode */
+ 0x62, /* CPU FAN1 mode */
};
static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
@@ -193,10 +197,12 @@ static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
-
/* Advanced Fan control, some values are common for all fans */
-static const u8 W83627EHF_REG_FAN_MIN_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
-static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0C, 0x0D, 0x17, 0x66 };
+static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
+static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
+static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
+static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 };
+static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a };
/*
* Conversions
@@ -295,14 +301,19 @@ struct w83627ehf_data {
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
- 2->thermal cruise (also called SmartFan I) */
+ 2->thermal cruise mode (also called SmartFan I)
+ 3->fan speed cruise mode
+ 4->variable thermal cruise (also called SmartFan III) */
u8 pwm_num; /* number of pwm */
u8 pwm[4];
u8 target_temp[4];
u8 tolerance[4];
- u8 fan_min_output[4]; /* minimum fan speed */
- u8 fan_stop_time[4];
+ u8 fan_start_output[4]; /* minimum fan speed when spinning up */
+ u8 fan_stop_output[4]; /* minimum fan speed when spinning down */
+ u8 fan_stop_time[4]; /* time at minimum before disabling fan */
+ u8 fan_max_output[4]; /* maximum fan speed */
+ u8 fan_step_output[4]; /* rate of change output value */
u8 vid;
u8 vrm;
@@ -529,8 +540,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
& 3) + 1;
data->pwm[i] = w83627ehf_read_value(data,
W83627EHF_REG_PWM[i]);
- data->fan_min_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_MIN_OUTPUT[i]);
+ data->fan_start_output[i] = w83627ehf_read_value(data,
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
+ data->fan_stop_output[i] = w83627ehf_read_value(data,
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_STOP_TIME[i]);
data->target_temp[i] =
@@ -976,7 +989,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
u32 val = simple_strtoul(buf, NULL, 10);
u16 reg;
- if (!val || (val > 2)) /* only modes 1 and 2 are supported */
+ if (!val || (val > 4))
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
@@ -1118,7 +1131,10 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
return count; \
}
-fan_functions(fan_min_output, FAN_MIN_OUTPUT)
+fan_functions(fan_start_output, FAN_START_OUTPUT)
+fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
@@ -1161,8 +1177,14 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 3),
+ SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 3),
+ SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 3),
+ SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 3),
+ SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 3),
};
static struct sensor_device_attribute sda_sf3_arrays[] = {
@@ -1172,12 +1194,24 @@ static struct sensor_device_attribute sda_sf3_arrays[] = {
store_fan_stop_time, 1),
SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 2),
- SENSOR_ATTR(pwm1_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 0),
- SENSOR_ATTR(pwm2_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 1),
- SENSOR_ATTR(pwm3_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 2),
+ SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 0),
+ SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 1),
+ SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 2),
+ SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 0),
+ SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 1),
+ SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 2),
+
+ /* pwm1 and pwm3 don't support max and step settings */
+ SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 1),
+ SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 1),
};
static ssize_t
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 7ab7967da0a0..05f9225b6f94 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -56,9 +56,10 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f);
+enum chips { w83781d, w83782d, w83783s, as99127f };
+
+/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -1051,8 +1052,7 @@ w83781d_create_files(struct device *dev, int kind, int is_isa)
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
-w83781d_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+w83781d_detect(struct i2c_client *client, struct i2c_board_info *info)
{
int val1, val2;
struct w83781d_data *isa = w83781d_data_if_isa();
@@ -1537,7 +1537,7 @@ static struct i2c_driver w83781d_driver = {
.remove = w83781d_remove,
.id_table = w83781d_ids,
.detect = w83781d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 0410bf12c521..400a88bde278 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -52,7 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83791d);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -326,7 +325,7 @@ struct w83791d_data {
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83791d_detect(struct i2c_client *client, int kind,
+static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
@@ -341,7 +340,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
static void w83791d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83791d_id[] = {
- { "w83791d", w83791d },
+ { "w83791d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83791d_id);
@@ -355,7 +354,7 @@ static struct i2c_driver w83791d_driver = {
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* following are the sysfs callback functions */
@@ -1259,7 +1258,7 @@ error_sc_0:
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83791d_detect(struct i2c_client *client, int kind,
+static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 38978851333f..679718e6b017 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -50,7 +50,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83792d);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -302,7 +301,7 @@ struct w83792d_data {
static int w83792d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83792d_detect(struct i2c_client *client, int kind,
+static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83792d_remove(struct i2c_client *client);
static struct w83792d_data *w83792d_update_device(struct device *dev);
@@ -314,7 +313,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
static void w83792d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83792d_id[] = {
- { "w83792d", w83792d },
+ { "w83792d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83792d_id);
@@ -328,7 +327,7 @@ static struct i2c_driver w83792d_driver = {
.remove = w83792d_remove,
.id_table = w83792d_id,
.detect = w83792d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline long in_count_from_reg(int nr, struct w83792d_data *data)
@@ -1263,7 +1262,7 @@ static const struct attribute_group w83792d_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
-w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info)
+w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int val1, val2;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 80a2191bf127..9a2022b67495 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -41,7 +41,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83793);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -230,7 +229,7 @@ static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83793_detect(struct i2c_client *client, int kind,
+static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
@@ -238,7 +237,7 @@ static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
- { "w83793", w83793 },
+ { "w83793", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
@@ -252,7 +251,7 @@ static struct i2c_driver w83793_driver = {
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t
@@ -1161,7 +1160,7 @@ ERROR_SC_0:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83793_detect(struct i2c_client *client, int kind,
+static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
u8 tmp, bank, chip_id;
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 9b6c4c10fba7..20781def65ed 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -52,12 +52,6 @@
static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(w83l785ts);
-
-/*
* The W83L785TS-S registers
* Manufacturer ID is 0x5CA3 for Winbond.
*/
@@ -83,7 +77,7 @@ I2C_CLIENT_INSMOD_1(w83l785ts);
static int w83l785ts_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83l785ts_detect(struct i2c_client *client, int kind,
+static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83l785ts_remove(struct i2c_client *client);
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
@@ -94,7 +88,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
*/
static const struct i2c_device_id w83l785ts_id[] = {
- { "w83l785ts", w83l785ts },
+ { "w83l785ts", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l785ts_id);
@@ -108,7 +102,7 @@ static struct i2c_driver w83l785ts_driver = {
.remove = w83l785ts_remove,
.id_table = w83l785ts_id,
.detect = w83l785ts_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -146,7 +140,7 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1);
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83l785ts_detect(struct i2c_client *client, int kind,
+static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 27da7d2b15fb..0254e181893d 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -38,7 +38,6 @@
static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83l786ng);
static int reset;
module_param(reset, bool, 0);
@@ -147,14 +146,14 @@ struct w83l786ng_data {
static int w83l786ng_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83l786ng_detect(struct i2c_client *client, int kind,
+static int w83l786ng_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83l786ng_remove(struct i2c_client *client);
static void w83l786ng_init_client(struct i2c_client *client);
static struct w83l786ng_data *w83l786ng_update_device(struct device *dev);
static const struct i2c_device_id w83l786ng_id[] = {
- { "w83l786ng", w83l786ng },
+ { "w83l786ng", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l786ng_id);
@@ -168,7 +167,7 @@ static struct i2c_driver w83l786ng_driver = {
.remove = w83l786ng_remove,
.id_table = w83l786ng_id,
.detect = w83l786ng_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static u8
@@ -586,8 +585,7 @@ static const struct attribute_group w83l786ng_group = {
};
static int
-w83l786ng_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
u16 man_id;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 049555777f67..7647a20523a0 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1155,7 +1155,7 @@ static int i2c_pxa_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops i2c_pxa_dev_pm_ops = {
+static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
.suspend_noirq = i2c_pxa_suspend_noirq,
.resume_noirq = i2c_pxa_resume_noirq,
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 96aafb91b69a..1d8c98613fa0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -967,7 +967,7 @@ static int s3c24xx_i2c_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 86a9d4e81472..ccc46418ef7f 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -647,7 +647,7 @@ static int sh_mobile_i2c_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_suspend = sh_mobile_i2c_runtime_nop,
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 4f34823e86b1..0ac2f90ab840 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -155,6 +155,35 @@ static void i2c_device_shutdown(struct device *dev)
driver->shutdown(client);
}
+#ifdef CONFIG_SUSPEND
+static int i2c_device_pm_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->suspend)
+ return 0;
+ return pm->suspend(dev);
+}
+
+static int i2c_device_pm_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->resume)
+ return 0;
+ return pm->resume(dev);
+}
+#else
+#define i2c_device_pm_suspend NULL
+#define i2c_device_pm_resume NULL
+#endif
+
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -219,6 +248,11 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
NULL
};
+const static struct dev_pm_ops i2c_device_pm_ops = {
+ .suspend = i2c_device_pm_suspend,
+ .resume = i2c_device_pm_resume,
+};
+
struct bus_type i2c_bus_type = {
.name = "i2c",
.match = i2c_device_match,
@@ -227,6 +261,7 @@ struct bus_type i2c_bus_type = {
.shutdown = i2c_device_shutdown,
.suspend = i2c_device_suspend,
.resume = i2c_device_resume,
+ .pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -1184,7 +1219,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
- err = driver->detect(temp_client, -1, &info);
+ err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
here as this isn't an error. */
@@ -1214,13 +1249,13 @@ static int i2c_detect_address(struct i2c_client *temp_client,
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
{
- const struct i2c_client_address_data *address_data;
+ const unsigned short *address_list;
struct i2c_client *temp_client;
int i, err = 0;
int adap_id = i2c_adapter_id(adapter);
- address_data = driver->address_data;
- if (!driver->detect || !address_data)
+ address_list = driver->address_list;
+ if (!driver->detect || !address_list)
return 0;
/* Set up a temporary client to help detect callback */
@@ -1235,7 +1270,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
/* Stop here if we can't use SMBUS_QUICK */
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) {
- if (address_data->normal_i2c[0] == I2C_CLIENT_END)
+ if (address_list[0] == I2C_CLIENT_END)
goto exit_free;
dev_warn(&adapter->dev, "SMBus Quick command not supported, "
@@ -1244,11 +1279,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
goto exit_free;
}
- for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) {
+ for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
- "addr 0x%02x\n", adap_id,
- address_data->normal_i2c[i]);
- temp_client->addr = address_data->normal_i2c[i];
+ "addr 0x%02x\n", adap_id, address_list[i]);
+ temp_client->addr = address_list[i];
err = i2c_detect_address(temp_client, driver);
if (err)
goto exit_free;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4b89b791be6a..42be0b15084b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
- __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
- smp_processor_id()));
+ __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
}
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 203b88a82b56..02c836e11813 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -24,6 +24,16 @@ config KEYBOARD_AAED2000
To compile this driver as a module, choose M here: the
module will be called aaed2000_kbd.
+config KEYBOARD_ADP5520
+ tristate "Keypad Support for ADP5520 PMIC"
+ depends on PMIC_ADP5520
+ help
+ This option enables support for the keypad scan matrix
+ on Analog Devices ADP5520 PMICs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp5520-keys.
+
config KEYBOARD_ADP5588
tristate "ADP5588 I2C QWERTY Keypad and IO Expander"
depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 68c017235ce9..78654ef65206 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
+obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
new file mode 100644
index 000000000000..a7ba27fb4109
--- /dev/null
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -0,0 +1,220 @@
+/*
+ * Keypad driver for Analog Devices ADP5520 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_keys {
+ struct input_dev *input;
+ struct notifier_block notifier;
+ struct device *master;
+ unsigned short keycode[ADP5520_KEYMAPSIZE];
+};
+
+static void adp5520_keys_report_event(struct adp5520_keys *dev,
+ unsigned short keymask, int value)
+{
+ int i;
+
+ for (i = 0; i < ADP5520_MAXKEYS; i++)
+ if (keymask & (1 << i))
+ input_report_key(dev->input, dev->keycode[i], value);
+
+ input_sync(dev->input);
+}
+
+static int adp5520_keys_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct adp5520_keys *dev;
+ uint8_t reg_val_lo, reg_val_hi;
+ unsigned short keymask;
+
+ dev = container_of(nb, struct adp5520_keys, notifier);
+
+ if (event & ADP5520_KP_INT) {
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi);
+
+ keymask = (reg_val_hi << 8) | reg_val_lo;
+ /* Read twice to clear */
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi);
+ keymask |= (reg_val_hi << 8) | reg_val_lo;
+ adp5520_keys_report_event(dev, keymask, 1);
+ }
+
+ if (event & ADP5520_KR_INT) {
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi);
+
+ keymask = (reg_val_hi << 8) | reg_val_lo;
+ /* Read twice to clear */
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi);
+ keymask |= (reg_val_hi << 8) | reg_val_lo;
+ adp5520_keys_report_event(dev, keymask, 0);
+ }
+
+ return 0;
+}
+
+static int __devinit adp5520_keys_probe(struct platform_device *pdev)
+{
+ struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct input_dev *input;
+ struct adp5520_keys *dev;
+ int ret, i;
+ unsigned char en_mask, ctl_mask = 0;
+
+ if (pdev->id != ID_ADP5520) {
+ dev_err(&pdev->dev, "only ADP5520 supports Keypad\n");
+ return -EINVAL;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ if (!(pdata->rows_en_mask && pdata->cols_en_mask))
+ return -EINVAL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ input = input_allocate_device();
+ if (!input) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->master = pdev->dev.parent;
+ dev->input = input;
+
+ input->name = pdev->name;
+ input->phys = "adp5520-keys/input0";
+ input->dev.parent = &pdev->dev;
+
+ input_set_drvdata(input, dev);
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x5520;
+ input->id.version = 0x0001;
+
+ input->keycodesize = sizeof(dev->keycode[0]);
+ input->keycodemax = pdata->keymapsize;
+ input->keycode = dev->keycode;
+
+ memcpy(dev->keycode, pdata->keymap,
+ pdata->keymapsize * input->keycodesize);
+
+ /* setup input device */
+ __set_bit(EV_KEY, input->evbit);
+
+ if (pdata->repeat)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < input->keycodemax; i++)
+ __set_bit(dev->keycode[i], input->keybit);
+ __clear_bit(KEY_RESERVED, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register input device\n");
+ goto err;
+ }
+
+ en_mask = pdata->rows_en_mask | pdata->cols_en_mask;
+
+ ret = adp5520_set_bits(dev->master, ADP5520_GPIO_CFG_1, en_mask);
+
+ if (en_mask & ADP5520_COL_C3)
+ ctl_mask |= ADP5520_C3_MODE;
+
+ if (en_mask & ADP5520_ROW_R3)
+ ctl_mask |= ADP5520_R3_MODE;
+
+ if (ctl_mask)
+ ret |= adp5520_set_bits(dev->master, ADP5520_LED_CONTROL,
+ ctl_mask);
+
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_PULLUP,
+ pdata->rows_en_mask);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ ret = -EIO;
+ goto err1;
+ }
+
+ dev->notifier.notifier_call = adp5520_keys_notifier;
+ ret = adp5520_register_notifier(dev->master, &dev->notifier,
+ ADP5520_KP_IEN | ADP5520_KR_IEN);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier\n");
+ goto err1;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+err1:
+ input_unregister_device(input);
+ input = NULL;
+err:
+ input_free_device(input);
+ kfree(dev);
+ return ret;
+}
+
+static int __devexit adp5520_keys_remove(struct platform_device *pdev)
+{
+ struct adp5520_keys *dev = platform_get_drvdata(pdev);
+
+ adp5520_unregister_notifier(dev->master, &dev->notifier,
+ ADP5520_KP_IEN | ADP5520_KR_IEN);
+
+ input_unregister_device(dev->input);
+ kfree(dev);
+ return 0;
+}
+
+static struct platform_driver adp5520_keys_driver = {
+ .driver = {
+ .name = "adp5520-keys",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_keys_probe,
+ .remove = __devexit_p(adp5520_keys_remove),
+};
+
+static int __init adp5520_keys_init(void)
+{
+ return platform_driver_register(&adp5520_keys_driver);
+}
+module_init(adp5520_keys_init);
+
+static void __exit adp5520_keys_exit(void)
+{
+ platform_driver_unregister(&adp5520_keys_driver);
+}
+module_exit(adp5520_keys_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Keys ADP5520 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-keys");
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index d48c808d5928..1edb596d927b 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -319,7 +319,7 @@ static int adp5588_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops adp5588_dev_pm_ops = {
+static const struct dev_pm_ops adp5588_dev_pm_ops = {
.suspend = adp5588_suspend,
.resume = adp5588_resume,
};
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 076111fc72d2..8e9380bfed40 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -295,7 +295,7 @@ static int sh_keysc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_keysc_dev_pm_ops = {
+static const struct dev_pm_ops sh_keysc_dev_pm_ops = {
.suspend = sh_keysc_suspend,
.resume = sh_keysc_resume,
};
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 9a2977c21696..eeaa7acb9cfc 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -31,7 +31,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -133,7 +133,7 @@ struct twl4030_keypad {
static int twl4030_kpread(struct twl4030_keypad *kp,
u8 *data, u32 reg, u8 num_bytes)
{
- int ret = twl4030_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes);
+ int ret = twl_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes);
if (ret < 0)
dev_warn(kp->dbg_dev,
@@ -145,7 +145,7 @@ static int twl4030_kpread(struct twl4030_keypad *kp,
static int twl4030_kpwrite_u8(struct twl4030_keypad *kp, u8 data, u32 reg)
{
- int ret = twl4030_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg);
+ int ret = twl_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg);
if (ret < 0)
dev_warn(kp->dbg_dev,
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 690f3fafa03b..61d10177fa83 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -247,7 +247,7 @@ static int bfin_rotary_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops bfin_rotary_pm_ops = {
+static const struct dev_pm_ops bfin_rotary_pm_ops = {
.suspend = bfin_rotary_suspend,
.resume = bfin_rotary_resume,
};
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 039dcb00ebd9..008de0c5834b 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -55,7 +55,6 @@ pcf50633_input_irq(int irq, void *data)
static int __devinit pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
struct input_dev *input_dev;
int ret;
@@ -71,7 +70,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, input);
- input->pcf = pdata->pcf;
+ input->pcf = dev_to_pcf50633(pdev->dev.parent);
input->input_dev = input_dev;
input_dev->name = "PCF50633 PMU events";
@@ -85,9 +84,9 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
kfree(input);
return ret;
}
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYR,
+ pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR,
pcf50633_input_irq, input);
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYF,
+ pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF,
pcf50633_input_irq, input);
return 0;
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 21cb755a54fb..ea4e1fd12651 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -127,7 +127,7 @@ static void pcspkr_shutdown(struct platform_device *dev)
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
}
-static struct dev_pm_ops pcspkr_pm_ops = {
+static const struct dev_pm_ops pcspkr_pm_ops = {
.suspend = pcspkr_suspend,
};
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index f5fc9974a111..bdde5c889035 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -27,7 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#define PWR_PWRON_IRQ (1 << 0)
@@ -49,7 +49,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
local_irq_enable();
#endif
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
STS_HW_CONDITIONS);
if (!err) {
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index 67fcd33595de..b79097e3028a 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -233,7 +233,7 @@ static int pcap_ts_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pcap_ts_pm_ops = {
+static const struct dev_pm_ops pcap_ts_pm_ops = {
.suspend = pcap_ts_suspend,
.resume = pcap_ts_resume,
};
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 5a6ae646a636..94b796d84053 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -108,8 +108,7 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
p_dev->io.NumPorts2 = 0;
/* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index f9bdff39cf4a..e5deb15cf40c 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -120,8 +120,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
p_dev->io.IOAddrLines = 5;
/* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index a2f709f53974..c9a30b1c9237 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -137,7 +137,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
local->cardnr = -1;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index af5d393cc2d0..7836ec3c7f86 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -144,7 +144,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
link->priv = local;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index ea705394ce2b..b0c5976cbdb3 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -127,7 +127,7 @@ static int teles_probe(struct pcmcia_device *link)
link->priv = local;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f2cc13d76810..782f95822eab 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -50,7 +50,7 @@ static ssize_t led_brightness_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 3b83406de752..38b3378be442 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -83,7 +83,7 @@ static ssize_t led_delay_on_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
@@ -127,7 +127,7 @@ static ssize_t led_delay_off_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6ae388849a3b..fb2b7ef7868e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
}
-static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
+static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
/*S:010
* We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
- if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
- __get_cpu_var(last_cpu) = cpu;
+ if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __get_cpu_var(lg_last_cpu) = cpu;
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2158377a1359..acb3a4e404ff 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -185,11 +185,10 @@ config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
help
- Multipath-IO is the ability of certain devices to address the same
- physical disk over multiple 'IO paths'. The code ensures that such
- paths can be defined and handled at runtime, and ensures that a
- transparent failover to the backup path(s) happens if a IO errors
- arrives on the primary path.
+ MD_MULTIPATH provides a simple multi-path personality for use
+ the MD framework. It is not under active development. New
+ projects should consider using DM_MULTIPATH which has more
+ features and more testing.
If unsure, say N.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 60e2b322db11..26ac8aad0b19 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -212,7 +212,7 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
*/
/* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset,
+static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
struct page *page,
unsigned long index, int size)
{
@@ -287,27 +287,36 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
+ loff_t offset = mddev->bitmap_info.offset;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
- if (bitmap->offset < 0) {
+ if (mddev->external) {
+ /* Bitmap could be anywhere. */
+ if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) >
+ rdev->data_offset &&
+ rdev->sb_start + offset <
+ rdev->data_offset + mddev->dev_sectors +
+ (PAGE_SIZE/512))
+ goto bad_alignment;
+ } else if (offset < 0) {
/* DATA BITMAP METADATA */
- if (bitmap->offset
+ if (offset
+ (long)(page->index * (PAGE_SIZE/512))
+ size/512 > 0)
/* bitmap runs in to metadata */
goto bad_alignment;
if (rdev->data_offset + mddev->dev_sectors
- > rdev->sb_start + bitmap->offset)
+ > rdev->sb_start + offset)
/* data runs in to bitmap */
goto bad_alignment;
} else if (rdev->sb_start < rdev->data_offset) {
/* METADATA BITMAP DATA */
if (rdev->sb_start
- + bitmap->offset
+ + offset
+ page->index*(PAGE_SIZE/512) + size/512
> rdev->data_offset)
/* bitmap runs in to data */
@@ -316,7 +325,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
/* DATA METADATA BITMAP - no problems */
}
md_super_write(mddev, rdev,
- rdev->sb_start + bitmap->offset
+ rdev->sb_start + offset
+ page->index * (PAGE_SIZE/512),
size,
page);
@@ -488,6 +497,8 @@ void bitmap_update_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
return;
+ if (bitmap->mddev->bitmap_info.external)
+ return;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* no superblock */
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -501,6 +512,9 @@ void bitmap_update_sb(struct bitmap *bitmap)
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
}
+ /* Just in case these have been changed via sysfs: */
+ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
+ sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
kunmap_atomic(sb, KM_USER0);
write_page(bitmap, bitmap->sb_page, 1);
}
@@ -550,7 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
} else {
- bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+ bitmap->sb_page = read_sb_page(bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
NULL,
0, sizeof(bitmap_super_t));
}
@@ -563,7 +578,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
- daemon_sleep = le32_to_cpu(sb->daemon_sleep);
+ daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
/* verify that the bitmap-specific fields are valid */
@@ -576,7 +591,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
reason = "bitmap chunksize too small";
else if ((1 << ffz(~chunksize)) != chunksize)
reason = "bitmap chunksize not a power of 2";
- else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
+ else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
reason = "daemon sleep period out of range";
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
@@ -610,10 +625,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
}
success:
/* assign fields using values from superblock */
- bitmap->chunksize = chunksize;
- bitmap->daemon_sleep = daemon_sleep;
- bitmap->daemon_lastrun = jiffies;
- bitmap->max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
bitmap->flags |= le32_to_cpu(sb->state);
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
bitmap->flags |= BITMAP_HOSTENDIAN;
@@ -664,16 +678,26 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
* general bitmap file operations
*/
+/*
+ * on-disk bitmap:
+ *
+ * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
+ * file a page at a time. There's a superblock at the start of the file.
+ */
/* calculate the index of the page that contains this bit */
-static inline unsigned long file_page_index(unsigned long chunk)
+static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
{
- return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT;
+ if (!bitmap->mddev->bitmap_info.external)
+ chunk += sizeof(bitmap_super_t) << 3;
+ return chunk >> PAGE_BIT_SHIFT;
}
/* calculate the (bit) offset of this bit within a page */
-static inline unsigned long file_page_offset(unsigned long chunk)
+static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
{
- return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1);
+ if (!bitmap->mddev->bitmap_info.external)
+ chunk += sizeof(bitmap_super_t) << 3;
+ return chunk & (PAGE_BITS - 1);
}
/*
@@ -686,8 +710,9 @@ static inline unsigned long file_page_offset(unsigned long chunk)
static inline struct page *filemap_get_page(struct bitmap *bitmap,
unsigned long chunk)
{
- if (file_page_index(chunk) >= bitmap->file_pages) return NULL;
- return bitmap->filemap[file_page_index(chunk) - file_page_index(0)];
+ if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL;
+ return bitmap->filemap[file_page_index(bitmap, chunk)
+ - file_page_index(bitmap, 0)];
}
@@ -710,7 +735,7 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
while (pages--)
- if (map[pages]->index != 0) /* 0 is sb_page, release it below */
+ if (map[pages] != sb_page) /* 0 is sb_page, release it below */
free_buffers(map[pages]);
kfree(map);
kfree(attr);
@@ -821,7 +846,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
page = filemap_get_page(bitmap, chunk);
if (!page) return;
- bit = file_page_offset(chunk);
+ bit = file_page_offset(bitmap, chunk);
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
@@ -907,7 +932,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
chunks = bitmap->chunks;
file = bitmap->file;
- BUG_ON(!file && !bitmap->offset);
+ BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
#ifdef INJECT_FAULTS_3
outofdate = 1;
@@ -919,14 +944,17 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
"recovery\n", bmname(bitmap));
bytes = (chunks + 7) / 8;
+ if (!bitmap->mddev->bitmap_info.external)
+ bytes += sizeof(bitmap_super_t);
- num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
- if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) {
+ if (file && i_size_read(file->f_mapping->host) < bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
bmname(bitmap),
(unsigned long) i_size_read(file->f_mapping->host),
- bytes + sizeof(bitmap_super_t));
+ bytes);
goto err;
}
@@ -947,17 +975,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
for (i = 0; i < chunks; i++) {
int b;
- index = file_page_index(i);
- bit = file_page_offset(i);
+ index = file_page_index(bitmap, i);
+ bit = file_page_offset(bitmap, i);
if (index != oldindex) { /* this is a new page, read it in */
int count;
/* unmap the old page, we're done with it */
if (index == num_pages-1)
- count = bytes + sizeof(bitmap_super_t)
- - index * PAGE_SIZE;
+ count = bytes - index * PAGE_SIZE;
else
count = PAGE_SIZE;
- if (index == 0) {
+ if (index == 0 && bitmap->sb_page) {
/*
* if we're here then the superblock page
* contains some bits (PAGE_SIZE != sizeof sb)
@@ -967,14 +994,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = sizeof(bitmap_super_t);
if (!file)
read_sb_page(bitmap->mddev,
- bitmap->offset,
+ bitmap->mddev->bitmap_info.offset,
page,
index, count);
} else if (file) {
page = read_page(file, index, bitmap, count);
offset = 0;
} else {
- page = read_sb_page(bitmap->mddev, bitmap->offset,
+ page = read_sb_page(bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
NULL,
index, count);
offset = 0;
@@ -1078,23 +1106,32 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
* out to disk
*/
-void bitmap_daemon_work(struct bitmap *bitmap)
+void bitmap_daemon_work(mddev_t *mddev)
{
+ struct bitmap *bitmap;
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
int blocks;
void *paddr;
- if (bitmap == NULL)
+ /* Use a mutex to guard daemon_work against
+ * bitmap_destroy.
+ */
+ mutex_lock(&mddev->bitmap_info.mutex);
+ bitmap = mddev->bitmap;
+ if (bitmap == NULL) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
return;
- if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
+ }
+ if (time_before(jiffies, bitmap->daemon_lastrun
+ + bitmap->mddev->bitmap_info.daemon_sleep))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- return;
+ goto done;
}
bitmap->allclean = 1;
@@ -1142,7 +1179,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
/* We are possibly going to clear some bits, so make
* sure that events_cleared is up-to-date.
*/
- if (bitmap->need_sync) {
+ if (bitmap->need_sync &&
+ bitmap->mddev->bitmap_info.external == 0) {
bitmap_super_t *sb;
bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1152,7 +1190,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
write_page(bitmap, bitmap->sb_page, 1);
}
spin_lock_irqsave(&bitmap->lock, flags);
- clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
+ if (!bitmap->need_sync)
+ clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
}
bmc = bitmap_get_counter(bitmap,
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
@@ -1167,7 +1206,7 @@ void bitmap_daemon_work(struct bitmap *bitmap)
if (*bmc == 2) {
*bmc=1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
- } else if (*bmc == 1) {
+ } else if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */
*bmc = 0;
bitmap_count_page(bitmap,
@@ -1177,9 +1216,11 @@ void bitmap_daemon_work(struct bitmap *bitmap)
/* clear the bit */
paddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(j), paddr);
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
else
- ext2_clear_bit(file_page_offset(j), paddr);
+ ext2_clear_bit(file_page_offset(bitmap, j),
+ paddr);
kunmap_atomic(paddr, KM_USER0);
}
} else
@@ -1202,7 +1243,9 @@ void bitmap_daemon_work(struct bitmap *bitmap)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
+ bitmap->mddev->thread->timeout =
+ bitmap->mddev->bitmap_info.daemon_sleep;
+ mutex_unlock(&mddev->bitmap_info.mutex);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1332,6 +1375,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
+ sysfs_notify_dirent(bitmap->sysfs_can_clear);
}
if (!success && ! (*bmc & NEEDED_MASK))
@@ -1470,7 +1514,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
return;
}
if (time_before(jiffies, (bitmap->last_end_sync
- + bitmap->daemon_sleep * HZ)))
+ + bitmap->mddev->bitmap_info.daemon_sleep)))
return;
wait_event(bitmap->mddev->recovery_wait,
atomic_read(&bitmap->mddev->recovery_active) == 0);
@@ -1522,6 +1566,12 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
bitmap_file_set_bit(bitmap, sec);
+ if (sec < bitmap->mddev->recovery_cp)
+ /* We are asserting that the array is dirty,
+ * so move the recovery_cp address back so
+ * that it is obvious that it is dirty
+ */
+ bitmap->mddev->recovery_cp = sec;
}
}
@@ -1531,7 +1581,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
void bitmap_flush(mddev_t *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
- int sleep;
+ long sleep;
if (!bitmap) /* there was no bitmap */
return;
@@ -1539,12 +1589,13 @@ void bitmap_flush(mddev_t *mddev)
/* run the daemon_work three time to ensure everything is flushed
* that can be
*/
- sleep = bitmap->daemon_sleep;
- bitmap->daemon_sleep = 0;
- bitmap_daemon_work(bitmap);
- bitmap_daemon_work(bitmap);
- bitmap_daemon_work(bitmap);
- bitmap->daemon_sleep = sleep;
+ sleep = mddev->bitmap_info.daemon_sleep * 2;
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
bitmap_update_sb(bitmap);
}
@@ -1574,6 +1625,7 @@ static void bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
+
void bitmap_destroy(mddev_t *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -1581,10 +1633,15 @@ void bitmap_destroy(mddev_t *mddev)
if (!bitmap) /* there was no bitmap */
return;
+ mutex_lock(&mddev->bitmap_info.mutex);
mddev->bitmap = NULL; /* disconnect from the md device */
+ mutex_unlock(&mddev->bitmap_info.mutex);
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ if (bitmap->sysfs_can_clear)
+ sysfs_put(bitmap->sysfs_can_clear);
+
bitmap_free(bitmap);
}
@@ -1598,16 +1655,17 @@ int bitmap_create(mddev_t *mddev)
sector_t blocks = mddev->resync_max_sectors;
unsigned long chunks;
unsigned long pages;
- struct file *file = mddev->bitmap_file;
+ struct file *file = mddev->bitmap_info.file;
int err;
sector_t start;
+ struct sysfs_dirent *bm;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
- if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */
+ if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
return 0;
- BUG_ON(file && mddev->bitmap_offset);
+ BUG_ON(file && mddev->bitmap_info.offset);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1620,8 +1678,14 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev;
+ bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+ if (bm) {
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+ sysfs_put(bm);
+ } else
+ bitmap->sysfs_can_clear = NULL;
+
bitmap->file = file;
- bitmap->offset = mddev->bitmap_offset;
if (file) {
get_file(file);
/* As future accesses to this file will use bmap,
@@ -1630,12 +1694,22 @@ int bitmap_create(mddev_t *mddev)
*/
vfs_fsync(file, file->f_dentry, 1);
}
- /* read superblock from bitmap file (this sets bitmap->chunksize) */
- err = bitmap_read_sb(bitmap);
+ /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
+ if (!mddev->bitmap_info.external)
+ err = bitmap_read_sb(bitmap);
+ else {
+ err = 0;
+ if (mddev->bitmap_info.chunksize == 0 ||
+ mddev->bitmap_info.daemon_sleep == 0)
+ /* chunksize and time_base need to be
+ * set first. */
+ err = -EINVAL;
+ }
if (err)
goto error;
- bitmap->chunkshift = ffz(~bitmap->chunksize);
+ bitmap->daemon_lastrun = jiffies;
+ bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
@@ -1677,7 +1751,8 @@ int bitmap_create(mddev_t *mddev)
mddev->bitmap = bitmap;
- mddev->thread->timeout = bitmap->daemon_sleep * HZ;
+ mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
+ md_wakeup_thread(mddev->thread);
bitmap_update_sb(bitmap);
@@ -1688,6 +1763,264 @@ int bitmap_create(mddev_t *mddev)
return err;
}
+static ssize_t
+location_show(mddev_t *mddev, char *page)
+{
+ ssize_t len;
+ if (mddev->bitmap_info.file) {
+ len = sprintf(page, "file");
+ } else if (mddev->bitmap_info.offset) {
+ len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
+ } else
+ len = sprintf(page, "none");
+ len += sprintf(page+len, "\n");
+ return len;
+}
+
+static ssize_t
+location_store(mddev_t *mddev, const char *buf, size_t len)
+{
+
+ if (mddev->pers) {
+ if (!mddev->pers->quiesce)
+ return -EBUSY;
+ if (mddev->recovery || mddev->sync_thread)
+ return -EBUSY;
+ }
+
+ if (mddev->bitmap || mddev->bitmap_info.file ||
+ mddev->bitmap_info.offset) {
+ /* bitmap already configured. Only option is to clear it */
+ if (strncmp(buf, "none", 4) != 0)
+ return -EBUSY;
+ if (mddev->pers) {
+ mddev->pers->quiesce(mddev, 1);
+ bitmap_destroy(mddev);
+ mddev->pers->quiesce(mddev, 0);
+ }
+ mddev->bitmap_info.offset = 0;
+ if (mddev->bitmap_info.file) {
+ struct file *f = mddev->bitmap_info.file;
+ mddev->bitmap_info.file = NULL;
+ restore_bitmap_write_access(f);
+ fput(f);
+ }
+ } else {
+ /* No bitmap, OK to set a location */
+ long long offset;
+ if (strncmp(buf, "none", 4) == 0)
+ /* nothing to be done */;
+ else if (strncmp(buf, "file:", 5) == 0) {
+ /* Not supported yet */
+ return -EINVAL;
+ } else {
+ int rv;
+ if (buf[0] == '+')
+ rv = strict_strtoll(buf+1, 10, &offset);
+ else
+ rv = strict_strtoll(buf, 10, &offset);
+ if (rv)
+ return rv;
+ if (offset == 0)
+ return -EINVAL;
+ if (mddev->bitmap_info.external == 0 &&
+ mddev->major_version == 0 &&
+ offset != mddev->bitmap_info.default_offset)
+ return -EINVAL;
+ mddev->bitmap_info.offset = offset;
+ if (mddev->pers) {
+ mddev->pers->quiesce(mddev, 1);
+ rv = bitmap_create(mddev);
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+ }
+ mddev->pers->quiesce(mddev, 0);
+ if (rv)
+ return rv;
+ }
+ }
+ }
+ if (!mddev->external) {
+ /* Ensure new bitmap info is stored in
+ * metadata promptly.
+ */
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ }
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_location =
+__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
+
+static ssize_t
+timeout_show(mddev_t *mddev, char *page)
+{
+ ssize_t len;
+ unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
+ unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
+
+ len = sprintf(page, "%lu", secs);
+ if (jifs)
+ len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
+ len += sprintf(page+len, "\n");
+ return len;
+}
+
+static ssize_t
+timeout_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* timeout can be set at any time */
+ unsigned long timeout;
+ int rv = strict_strtoul_scaled(buf, &timeout, 4);
+ if (rv)
+ return rv;
+
+ /* just to make sure we don't overflow... */
+ if (timeout >= LONG_MAX / HZ)
+ return -EINVAL;
+
+ timeout = timeout * HZ / 10000;
+
+ if (timeout >= MAX_SCHEDULE_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT-1;
+ if (timeout < 1)
+ timeout = 1;
+ mddev->bitmap_info.daemon_sleep = timeout;
+ if (mddev->thread) {
+ /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
+ * the bitmap is all clean and we don't need to
+ * adjust the timeout right now
+ */
+ if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
+ mddev->thread->timeout = timeout;
+ md_wakeup_thread(mddev->thread);
+ }
+ }
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_timeout =
+__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
+
+static ssize_t
+backlog_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
+}
+
+static ssize_t
+backlog_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ unsigned long backlog;
+ int rv = strict_strtoul(buf, 10, &backlog);
+ if (rv)
+ return rv;
+ if (backlog > COUNTER_MAX)
+ return -EINVAL;
+ mddev->bitmap_info.max_write_behind = backlog;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_backlog =
+__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
+
+static ssize_t
+chunksize_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
+}
+
+static ssize_t
+chunksize_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* Can only be changed when no bitmap is active */
+ int rv;
+ unsigned long csize;
+ if (mddev->bitmap)
+ return -EBUSY;
+ rv = strict_strtoul(buf, 10, &csize);
+ if (rv)
+ return rv;
+ if (csize < 512 ||
+ !is_power_of_2(csize))
+ return -EINVAL;
+ mddev->bitmap_info.chunksize = csize;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_chunksize =
+__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
+
+static ssize_t metadata_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%s\n", (mddev->bitmap_info.external
+ ? "external" : "internal"));
+}
+
+static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap ||
+ mddev->bitmap_info.file ||
+ mddev->bitmap_info.offset)
+ return -EBUSY;
+ if (strncmp(buf, "external", 8) == 0)
+ mddev->bitmap_info.external = 1;
+ else if (strncmp(buf, "internal", 8) == 0)
+ mddev->bitmap_info.external = 0;
+ else
+ return -EINVAL;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_metadata =
+__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+
+static ssize_t can_clear_show(mddev_t *mddev, char *page)
+{
+ int len;
+ if (mddev->bitmap)
+ len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
+ "false" : "true"));
+ else
+ len = sprintf(page, "\n");
+ return len;
+}
+
+static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap == NULL)
+ return -ENOENT;
+ if (strncmp(buf, "false", 5) == 0)
+ mddev->bitmap->need_sync = 1;
+ else if (strncmp(buf, "true", 4) == 0) {
+ if (mddev->degraded)
+ return -EBUSY;
+ mddev->bitmap->need_sync = 0;
+ } else
+ return -EINVAL;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_can_clear =
+__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+
+static struct attribute *md_bitmap_attrs[] = {
+ &bitmap_location.attr,
+ &bitmap_timeout.attr,
+ &bitmap_backlog.attr,
+ &bitmap_chunksize.attr,
+ &bitmap_metadata.attr,
+ &bitmap_can_clear.attr,
+ NULL
+};
+struct attribute_group md_bitmap_group = {
+ .name = "bitmap",
+ .attrs = md_bitmap_attrs,
+};
+
+
/* the bitmap API -- for raid personalities */
EXPORT_SYMBOL(bitmap_startwrite);
EXPORT_SYMBOL(bitmap_endwrite);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index e98900671ca9..cb821d76d1b4 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -106,7 +106,7 @@ typedef __u16 bitmap_counter_t;
#define BITMAP_BLOCK_SHIFT 9
/* how many blocks per chunk? (this is variable) */
-#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
+#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
@@ -118,16 +118,6 @@ typedef __u16 bitmap_counter_t;
(CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
-/*
- * on-disk bitmap:
- *
- * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
- * file a page at a time. There's a superblock at the start of the file.
- */
-
-/* map chunks (bits) to file pages - offset by the size of the superblock */
-#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
-
#endif
/*
@@ -209,7 +199,6 @@ struct bitmap {
int counter_bits; /* how many bits per block counter */
/* bitmap chunksize -- how much data does each bit represent? */
- unsigned long chunksize;
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
unsigned long chunks; /* total number of data chunks for the array */
@@ -226,7 +215,6 @@ struct bitmap {
/* bitmap spinlock */
spinlock_t lock;
- long offset; /* offset from superblock if file is NULL */
struct file *file; /* backing disk file */
struct page *sb_page; /* cached copy of the bitmap file superblock */
struct page **filemap; /* list of cache pages for the file */
@@ -238,7 +226,6 @@ struct bitmap {
int allclean;
- unsigned long max_write_behind; /* write-behind mode */
atomic_t behind_writes;
/*
@@ -246,7 +233,6 @@ struct bitmap {
* file, cleaning up bits and flushing out pages to disk as necessary
*/
unsigned long daemon_lastrun; /* jiffies of last run */
- unsigned long daemon_sleep; /* how many seconds between updates? */
unsigned long last_end_sync; /* when we lasted called end_sync to
* update bitmap with resync progress */
@@ -254,6 +240,7 @@ struct bitmap {
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ struct sysfs_dirent *sysfs_can_clear;
};
/* the bitmap API */
@@ -282,7 +269,7 @@ void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap);
-void bitmap_daemon_work(struct bitmap *bitmap);
+void bitmap_daemon_work(mddev_t *mddev);
#endif
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e412980763bd..a93637223c8d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -71,10 +71,21 @@ struct crypt_iv_operations {
int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
const char *opts);
void (*dtr)(struct crypt_config *cc);
- const char *(*status)(struct crypt_config *cc);
+ int (*init)(struct crypt_config *cc);
+ int (*wipe)(struct crypt_config *cc);
int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};
+struct iv_essiv_private {
+ struct crypto_cipher *tfm;
+ struct crypto_hash *hash_tfm;
+ u8 *salt;
+};
+
+struct iv_benbi_private {
+ int shift;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -102,8 +113,8 @@ struct crypt_config {
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
union {
- struct crypto_cipher *essiv_tfm;
- int benbi_shift;
+ struct iv_essiv_private essiv;
+ struct iv_benbi_private benbi;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -147,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
* plain: the initial vector is the 32-bit little-endian version of the sector
* number, padded with zeros if necessary.
*
+ * plain64: the initial vector is the 64-bit little-endian version of the sector
+ * number, padded with zeros if necessary.
+ *
* essiv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
@@ -169,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
return 0;
}
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
- const char *opts)
+static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
+ sector_t sector)
{
- struct crypto_cipher *essiv_tfm;
- struct crypto_hash *hash_tfm;
+ memset(iv, 0, cc->iv_size);
+ *(u64 *)iv = cpu_to_le64(sector);
+
+ return 0;
+}
+
+/* Initialise ESSIV - compute salt but no local memory allocations */
+static int crypt_iv_essiv_init(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
struct hash_desc desc;
struct scatterlist sg;
- unsigned int saltsize;
- u8 *salt;
int err;
- if (opts == NULL) {
+ sg_init_one(&sg, cc->key, cc->key_size);
+ desc.tfm = essiv->hash_tfm;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+ if (err)
+ return err;
+
+ return crypto_cipher_setkey(essiv->tfm, essiv->salt,
+ crypto_hash_digestsize(essiv->hash_tfm));
+}
+
+/* Wipe salt and reset key derived from volume key */
+static int crypt_iv_essiv_wipe(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
+ unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+
+ memset(essiv->salt, 0, salt_size);
+
+ return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
+}
+
+static void crypt_iv_essiv_dtr(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
+
+ crypto_free_cipher(essiv->tfm);
+ essiv->tfm = NULL;
+
+ crypto_free_hash(essiv->hash_tfm);
+ essiv->hash_tfm = NULL;
+
+ kzfree(essiv->salt);
+ essiv->salt = NULL;
+}
+
+static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct crypto_cipher *essiv_tfm = NULL;
+ struct crypto_hash *hash_tfm = NULL;
+ u8 *salt = NULL;
+ int err;
+
+ if (!opts) {
ti->error = "Digest algorithm missing for ESSIV mode";
return -EINVAL;
}
- /* Hash the cipher key with the given hash algorithm */
+ /* Allocate hash algorithm */
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
- return PTR_ERR(hash_tfm);
+ err = PTR_ERR(hash_tfm);
+ goto bad;
}
- saltsize = crypto_hash_digestsize(hash_tfm);
- salt = kmalloc(saltsize, GFP_KERNEL);
- if (salt == NULL) {
+ salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+ if (!salt) {
ti->error = "Error kmallocing salt storage in ESSIV";
- crypto_free_hash(hash_tfm);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto bad;
}
- sg_init_one(&sg, cc->key, cc->key_size);
- desc.tfm = hash_tfm;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
- crypto_free_hash(hash_tfm);
-
- if (err) {
- ti->error = "Error calculating hash in ESSIV";
- kfree(salt);
- return err;
- }
-
- /* Setup the essiv_tfm with the given salt */
+ /* Allocate essiv_tfm */
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
- kfree(salt);
- return PTR_ERR(essiv_tfm);
+ err = PTR_ERR(essiv_tfm);
+ goto bad;
}
if (crypto_cipher_blocksize(essiv_tfm) !=
crypto_ablkcipher_ivsize(cc->tfm)) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
- crypto_free_cipher(essiv_tfm);
- kfree(salt);
- return -EINVAL;
+ err = -EINVAL;
+ goto bad;
}
- err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
- if (err) {
- ti->error = "Failed to set key for ESSIV cipher";
- crypto_free_cipher(essiv_tfm);
- kfree(salt);
- return err;
- }
- kfree(salt);
- cc->iv_gen_private.essiv_tfm = essiv_tfm;
+ cc->iv_gen_private.essiv.salt = salt;
+ cc->iv_gen_private.essiv.tfm = essiv_tfm;
+ cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
+
return 0;
-}
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
-{
- crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
- cc->iv_gen_private.essiv_tfm = NULL;
+bad:
+ if (essiv_tfm && !IS_ERR(essiv_tfm))
+ crypto_free_cipher(essiv_tfm);
+ if (hash_tfm && !IS_ERR(hash_tfm))
+ crypto_free_hash(hash_tfm);
+ kfree(salt);
+ return err;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
return 0;
}
@@ -273,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- cc->iv_gen_private.benbi_shift = 9 - log;
+ cc->iv_gen_private.benbi.shift = 9 - log;
return 0;
}
@@ -288,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0;
@@ -305,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
+static struct crypt_iv_operations crypt_iv_plain64_ops = {
+ .generator = crypt_iv_plain64_gen
+};
+
static struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
+ .init = crypt_iv_essiv_init,
+ .wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
@@ -934,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- return 0;
+ return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
- return 0;
+ return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
/*
@@ -983,11 +1038,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
- if (crypt_set_key(cc, argv[1])) {
- ti->error = "Error decoding key";
- goto bad_cipher;
- }
-
/* Compatibility mode for old dm-crypt cipher strings */
if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
chainmode = "cbc";
@@ -1015,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
strcpy(cc->chainmode, chainmode);
cc->tfm = tfm;
+ if (crypt_set_key(cc, argv[1]) < 0) {
+ ti->error = "Error decoding and setting key";
+ goto bad_ivmode;
+ }
+
/*
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
* See comments at iv code
@@ -1024,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
cc->iv_gen_ops = &crypt_iv_plain_ops;
+ else if (strcmp(ivmode, "plain64") == 0)
+ cc->iv_gen_ops = &crypt_iv_plain64_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
@@ -1039,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
goto bad_ivmode;
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
+ cc->iv_gen_ops->init(cc) < 0) {
+ ti->error = "Error initialising IV";
+ goto bad_slab_pool;
+ }
+
cc->iv_size = crypto_ablkcipher_ivsize(tfm);
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
@@ -1085,11 +1148,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_bs;
}
- if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
- ti->error = "Error setting key";
- goto bad_device;
- }
-
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
ti->error = "Invalid iv_offset sector";
goto bad_device;
@@ -1278,6 +1336,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
+ int ret = -EINVAL;
if (argc < 2)
goto error;
@@ -1287,10 +1346,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
- return crypt_set_key(cc, argv[2]);
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
+ if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
+ ret = crypt_set_key(cc, argv[2]);
+ if (ret)
+ return ret;
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init)
+ ret = cc->iv_gen_ops->init(cc);
+ return ret;
+ }
+ if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
+ if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
+ ret = cc->iv_gen_ops->wipe(cc);
+ if (ret)
+ return ret;
+ }
return crypt_wipe_key(cc);
+ }
}
error:
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 7dbe652efb5a..2b7907b6dd09 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -172,7 +172,8 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
- if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
+ if (chunk_size %
+ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
@@ -190,6 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ struct dm_snapshot *snap,
unsigned *args_used,
struct dm_exception_store **store)
{
@@ -198,7 +200,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store *tmp_store;
char persistent;
- if (argc < 3) {
+ if (argc < 2) {
ti->error = "Insufficient exception store arguments";
return -EINVAL;
}
@@ -209,14 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
return -ENOMEM;
}
- persistent = toupper(*argv[1]);
+ persistent = toupper(*argv[0]);
if (persistent == 'P')
type = get_type("P");
else if (persistent == 'N')
type = get_type("N");
else {
ti->error = "Persistent flag is not P or N";
- return -EINVAL;
+ r = -EINVAL;
+ goto bad_type;
}
if (!type) {
@@ -226,32 +229,23 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
tmp_store->type = type;
- tmp_store->ti = ti;
-
- r = dm_get_device(ti, argv[0], 0, 0,
- FMODE_READ | FMODE_WRITE, &tmp_store->cow);
- if (r) {
- ti->error = "Cannot get COW device";
- goto bad_cow;
- }
+ tmp_store->snap = snap;
- r = set_chunk_size(tmp_store, argv[2], &ti->error);
+ r = set_chunk_size(tmp_store, argv[1], &ti->error);
if (r)
- goto bad_ctr;
+ goto bad;
r = type->ctr(tmp_store, 0, NULL);
if (r) {
ti->error = "Exception store type constructor failed";
- goto bad_ctr;
+ goto bad;
}
- *args_used = 3;
+ *args_used = 2;
*store = tmp_store;
return 0;
-bad_ctr:
- dm_put_device(ti, tmp_store->cow);
-bad_cow:
+bad:
put_type(type);
bad_type:
kfree(tmp_store);
@@ -262,7 +256,6 @@ EXPORT_SYMBOL(dm_exception_store_create);
void dm_exception_store_destroy(struct dm_exception_store *store)
{
store->type->dtr(store);
- dm_put_device(store->ti, store->cow);
put_type(store->type);
kfree(store);
}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 8a223a48802c..e8dfa06af3ba 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -26,7 +26,7 @@ typedef sector_t chunk_t;
* of chunks that follow contiguously. Remaining bits hold the number of the
* chunk within the device.
*/
-struct dm_snap_exception {
+struct dm_exception {
struct list_head hash_list;
chunk_t old_chunk;
@@ -64,17 +64,34 @@ struct dm_exception_store_type {
* Find somewhere to store the next exception.
*/
int (*prepare_exception) (struct dm_exception_store *store,
- struct dm_snap_exception *e);
+ struct dm_exception *e);
/*
* Update the metadata with this exception.
*/
void (*commit_exception) (struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context);
/*
+ * Returns 0 if the exception store is empty.
+ *
+ * If there are exceptions still to be merged, sets
+ * *last_old_chunk and *last_new_chunk to the most recent
+ * still-to-be-merged chunk and returns the number of
+ * consecutive previous ones.
+ */
+ int (*prepare_merge) (struct dm_exception_store *store,
+ chunk_t *last_old_chunk, chunk_t *last_new_chunk);
+
+ /*
+ * Clear the last n exceptions.
+ * nr_merged must be <= the value returned by prepare_merge.
+ */
+ int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
+
+ /*
* The snapshot is invalid, note this in the metadata.
*/
void (*drop_snapshot) (struct dm_exception_store *store);
@@ -86,19 +103,19 @@ struct dm_exception_store_type {
/*
* Return how full the snapshot is.
*/
- void (*fraction_full) (struct dm_exception_store *store,
- sector_t *numerator,
- sector_t *denominator);
+ void (*usage) (struct dm_exception_store *store,
+ sector_t *total_sectors, sector_t *sectors_allocated,
+ sector_t *metadata_sectors);
/* For internal device-mapper use only. */
struct list_head list;
};
+struct dm_snapshot;
+
struct dm_exception_store {
struct dm_exception_store_type *type;
- struct dm_target *ti;
-
- struct dm_dev *cow;
+ struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
unsigned chunk_size;
@@ -109,6 +126,11 @@ struct dm_exception_store {
};
/*
+ * Obtain the cow device used by a given snapshot.
+ */
+struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
+
+/*
* Funtions to manipulate consecutive chunks
*/
# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
@@ -120,18 +142,25 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
+static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
-static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
+static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
{
e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
BUG_ON(!dm_consecutive_chunk_count(e));
}
+static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
+{
+ BUG_ON(!dm_consecutive_chunk_count(e));
+
+ e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
+}
+
# else
# define DM_CHUNK_CONSECUTIVE_BITS 0
@@ -140,12 +169,16 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk;
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
+static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
{
return 0;
}
-static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
+static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
+{
+}
+
+static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
{
}
@@ -162,7 +195,7 @@ static inline sector_t get_dev_size(struct block_device *bdev)
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
sector_t sector)
{
- return (sector & ~store->chunk_mask) >> store->chunk_shift;
+ return sector >> store->chunk_shift;
}
int dm_exception_store_type_register(struct dm_exception_store_type *type);
@@ -173,6 +206,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ struct dm_snapshot *snap,
unsigned *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3a2e6a2f8bdd..10f457ca6af2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,6 +5,8 @@
* This file is released under the GPL.
*/
+#include "dm.h"
+
#include <linux/device-mapper.h>
#include <linux/bio.h>
@@ -14,12 +16,19 @@
#include <linux/slab.h>
#include <linux/dm-io.h>
+#define DM_MSG_PREFIX "io"
+
+#define DM_IO_MAX_REGIONS BITS_PER_LONG
+
struct dm_io_client {
mempool_t *pool;
struct bio_set *bios;
};
-/* FIXME: can we shrink this ? */
+/*
+ * Aligning 'struct io' reduces the number of bits required to store
+ * its address. Refer to store_io_and_region_in_bio() below.
+ */
struct io {
unsigned long error_bits;
unsigned long eopnotsupp_bits;
@@ -28,7 +37,9 @@ struct io {
struct dm_io_client *client;
io_notify_fn callback;
void *context;
-};
+} __attribute__((aligned(DM_IO_MAX_REGIONS)));
+
+static struct kmem_cache *_dm_io_cache;
/*
* io contexts are only dynamically allocated for asynchronous
@@ -53,7 +64,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
+ client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
if (!client->pool)
goto bad;
@@ -88,18 +99,29 @@ EXPORT_SYMBOL(dm_io_client_destroy);
/*-----------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
- * In order to save a memory allocation we store this the last
- * bvec which we know is unused (blech).
- * XXX This is ugly and can OOPS with some configs... find another way.
+ * To avoid a memory allocation to store just 5 or 6 bits, we
+ * ensure the 'struct io' pointer is aligned so enough low bits are
+ * always zero and then combine it with the region number directly in
+ * bi_private.
*---------------------------------------------------------------*/
-static inline void bio_set_region(struct bio *bio, unsigned region)
+static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+ unsigned region)
{
- bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
+ if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
+ DMCRIT("Unaligned struct io pointer %p", io);
+ BUG();
+ }
+
+ bio->bi_private = (void *)((unsigned long)io | region);
}
-static inline unsigned bio_get_region(struct bio *bio)
+static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
+ unsigned *region)
{
- return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
+ unsigned long val = (unsigned long)bio->bi_private;
+
+ *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
+ *region = val & (DM_IO_MAX_REGIONS - 1);
}
/*-----------------------------------------------------------------
@@ -140,10 +162,8 @@ static void endio(struct bio *bio, int error)
/*
* The bio destructor in bio_put() may use the io object.
*/
- io = bio->bi_private;
- region = bio_get_region(bio);
+ retrieve_io_and_region_from_bio(bio, &io, &region);
- bio->bi_max_vecs++;
bio_put(bio);
dec_count(io, region, error);
@@ -243,7 +263,10 @@ static void vm_dp_init(struct dpages *dp, void *data)
static void dm_bio_destructor(struct bio *bio)
{
- struct io *io = bio->bi_private;
+ unsigned region;
+ struct io *io;
+
+ retrieve_io_and_region_from_bio(bio, &io, &region);
bio_free(bio, io->client->bios);
}
@@ -286,26 +309,23 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs;
sector_t remaining = where->count;
- while (remaining) {
+ /*
+ * where->count may be zero if rw holds a write barrier and we
+ * need to send a zero-sized barrier.
+ */
+ do {
/*
- * Allocate a suitably sized-bio: we add an extra
- * bvec for bio_get/set_region() and decrement bi_max_vecs
- * to hide it from bio_add_page().
+ * Allocate a suitably sized-bio.
*/
num_bvecs = dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
- num_bvecs);
- if (unlikely(num_bvecs > BIO_MAX_PAGES))
- num_bvecs = BIO_MAX_PAGES;
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
- bio->bi_private = io;
bio->bi_destructor = dm_bio_destructor;
- bio->bi_max_vecs--;
- bio_set_region(bio, region);
+ store_io_and_region_in_bio(bio, io, region);
/*
* Try and add as many pages as possible.
@@ -323,7 +343,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
atomic_inc(&io->count);
submit_bio(rw, bio);
- }
+ } while (remaining);
}
static void dispatch_io(int rw, unsigned int num_regions,
@@ -333,6 +353,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
int i;
struct dpages old_pages = *dp;
+ BUG_ON(num_regions > DM_IO_MAX_REGIONS);
+
if (sync)
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
@@ -342,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count)
+ if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
do_region(rw, i, where + i, dp, io);
}
@@ -357,7 +379,14 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, int rw, struct dpages *dp,
unsigned long *error_bits)
{
- struct io io;
+ /*
+ * gcc <= 4.3 can't do the alignment for stack variables, so we must
+ * align it on our own.
+ * volatile prevents the optimizer from removing or reusing
+ * "io_" field from the stack frame (allowed in ANSI C).
+ */
+ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
+ struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
WARN_ON(1);
@@ -365,33 +394,33 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
retry:
- io.error_bits = 0;
- io.eopnotsupp_bits = 0;
- atomic_set(&io.count, 1); /* see dispatch_io() */
- io.sleeper = current;
- io.client = client;
+ io->error_bits = 0;
+ io->eopnotsupp_bits = 0;
+ atomic_set(&io->count, 1); /* see dispatch_io() */
+ io->sleeper = current;
+ io->client = client;
- dispatch_io(rw, num_regions, where, dp, &io, 1);
+ dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&io.count))
+ if (!atomic_read(&io->count))
break;
io_schedule();
}
set_current_state(TASK_RUNNING);
- if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
+ if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
rw &= ~(1 << BIO_RW_BARRIER);
goto retry;
}
if (error_bits)
- *error_bits = io.error_bits;
+ *error_bits = io->error_bits;
- return io.error_bits ? -EIO : 0;
+ return io->error_bits ? -EIO : 0;
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
@@ -472,3 +501,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
&dp, io_req->notify.fn, io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
+
+int __init dm_io_init(void)
+{
+ _dm_io_cache = KMEM_CACHE(io, 0);
+ if (!_dm_io_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_io_exit(void)
+{
+ kmem_cache_destroy(_dm_io_cache);
+ _dm_io_cache = NULL;
+}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index a67942931582..1d669322b27c 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -56,6 +56,11 @@ static void dm_hash_remove_all(int keep_open_devices);
*/
static DECLARE_RWSEM(_hash_lock);
+/*
+ * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
+ */
+static DEFINE_MUTEX(dm_hash_cells_mutex);
+
static void init_buckets(struct list_head *buckets)
{
unsigned int i;
@@ -206,7 +211,9 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
}
dm_get(md);
+ mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(md, cell);
+ mutex_unlock(&dm_hash_cells_mutex);
up_write(&_hash_lock);
return 0;
@@ -224,9 +231,11 @@ static void __hash_remove(struct hash_cell *hc)
/* remove from the dev hash */
list_del(&hc->uuid_list);
list_del(&hc->name_list);
+ mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
+ mutex_unlock(&dm_hash_cells_mutex);
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
@@ -321,13 +330,15 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
*/
list_del(&hc->name_list);
old_name = hc->name;
+ mutex_lock(&dm_hash_cells_mutex);
hc->name = new_name;
+ mutex_unlock(&dm_hash_cells_mutex);
list_add(&hc->name_list, _name_buckets + hash_str(new_name));
/*
* Wake up any dm event waiters.
*/
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
@@ -512,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size)
return 0;
}
-
-
static int check_name(const char *name)
{
if (strchr(name, '/')) {
@@ -525,6 +534,40 @@ static int check_name(const char *name)
}
/*
+ * On successful return, the caller must not attempt to acquire
+ * _hash_lock without first calling dm_table_put, because dm_table_destroy
+ * waits for this dm_table_put and could be called under this lock.
+ */
+static struct dm_table *dm_get_inactive_table(struct mapped_device *md)
+{
+ struct hash_cell *hc;
+ struct dm_table *table = NULL;
+
+ down_read(&_hash_lock);
+ hc = dm_get_mdptr(md);
+ if (!hc || hc->md != md) {
+ DMWARN("device has been removed from the dev hash table.");
+ goto out;
+ }
+
+ table = hc->new_map;
+ if (table)
+ dm_table_get(table);
+
+out:
+ up_read(&_hash_lock);
+
+ return table;
+}
+
+static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
+ struct dm_ioctl *param)
+{
+ return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
+ dm_get_inactive_table(md) : dm_get_live_table(md);
+}
+
+/*
* Fills in a dm_ioctl structure, ready for sending back to
* userland.
*/
@@ -536,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
DM_ACTIVE_PRESENT_FLAG);
- if (dm_suspended(md))
+ if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
param->dev = huge_encode_dev(disk_devt(disk));
@@ -548,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
*/
param->open_count = dm_open_count(md);
- if (get_disk_ro(disk))
- param->flags |= DM_READONLY_FLAG;
-
param->event_nr = dm_get_event_nr(md);
+ param->target_count = 0;
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
- param->flags |= DM_ACTIVE_PRESENT_FLAG;
- param->target_count = dm_table_get_num_targets(table);
+ if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
+ if (get_disk_ro(disk))
+ param->flags |= DM_READONLY_FLAG;
+ param->target_count = dm_table_get_num_targets(table);
+ }
dm_table_put(table);
- } else
- param->target_count = 0;
+
+ param->flags |= DM_ACTIVE_PRESENT_FLAG;
+ }
+
+ if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
+ table = dm_get_inactive_table(md);
+ if (table) {
+ if (!(dm_table_get_mode(table) & FMODE_WRITE))
+ param->flags |= DM_READONLY_FLAG;
+ param->target_count = dm_table_get_num_targets(table);
+ dm_table_put(table);
+ }
+ }
return 0;
}
@@ -634,9 +689,9 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
* Sneakily write in both the name and the uuid
* while we have the cell.
*/
- strncpy(param->name, hc->name, sizeof(param->name));
+ strlcpy(param->name, hc->name, sizeof(param->name));
if (hc->uuid)
- strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1);
+ strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
else
param->uuid[0] = '\0';
@@ -784,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param)
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
r = dm_suspend(md, suspend_flags);
if (!r)
@@ -800,7 +855,7 @@ static int do_resume(struct dm_ioctl *param)
unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct hash_cell *hc;
struct mapped_device *md;
- struct dm_table *new_map;
+ struct dm_table *new_map, *old_map = NULL;
down_write(&_hash_lock);
@@ -826,14 +881,14 @@ static int do_resume(struct dm_ioctl *param)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
dm_suspend(md, suspend_flags);
- r = dm_swap_table(md, new_map);
- if (r) {
+ old_map = dm_swap_table(md, new_map);
+ if (IS_ERR(old_map)) {
dm_table_destroy(new_map);
dm_put(md);
- return r;
+ return PTR_ERR(old_map);
}
if (dm_table_get_mode(new_map) & FMODE_WRITE)
@@ -842,9 +897,11 @@ static int do_resume(struct dm_ioctl *param)
set_disk_ro(dm_disk(md), 1);
}
- if (dm_suspended(md))
+ if (dm_suspended_md(md))
r = dm_resume(md);
+ if (old_map)
+ dm_table_destroy(old_map);
if (!r) {
dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
@@ -982,7 +1039,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
@@ -1215,7 +1272,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_deps(table, param, param_size);
dm_table_put(table);
@@ -1244,13 +1301,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
}
- out:
+out:
dm_put(md);
return r;
}
@@ -1288,10 +1345,15 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out;
}
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (!table)
goto out_argv;
+ if (dm_deleting_md(md)) {
+ r = -ENXIO;
+ goto out_table;
+ }
+
ti = dm_table_find_target(table, tmsg->sector);
if (!dm_target_is_valid(ti)) {
DMWARN("Target message sector outside device.");
@@ -1303,6 +1365,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
r = -EINVAL;
}
+ out_table:
dm_table_put(table);
out_argv:
kfree(argv);
@@ -1582,8 +1645,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
if (!md)
return -ENXIO;
- dm_get(md);
- down_read(&_hash_lock);
+ mutex_lock(&dm_hash_cells_mutex);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
r = -ENXIO;
@@ -1596,8 +1658,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
strcpy(uuid, hc->uuid ? : "");
out:
- up_read(&_hash_lock);
- dm_put(md);
+ mutex_unlock(&dm_hash_cells_mutex);
return r;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3e3fc06cb861..addf83475040 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -450,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
- push(&kc->pages_jobs, job);
+ if (unlikely(!job->source.count))
+ push(&kc->complete_jobs, job);
+ else
+ push(&kc->pages_jobs, job);
wake(kc);
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 9443896ede07..7035582786fb 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
EXPORT_SYMBOL(dm_dirty_log_type_unregister);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned int argc, char **argv)
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned int argc, char **argv)
{
struct dm_dirty_log_type *type;
struct dm_dirty_log *log;
@@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
return NULL;
}
+ log->flush_callback_fn = flush_callback_fn;
log->type = type;
if (type->ctr(log, ti, argc, argv)) {
kfree(log);
@@ -208,7 +210,9 @@ struct log_header {
struct log_c {
struct dm_target *ti;
- int touched;
+ int touched_dirtied;
+ int touched_cleaned;
+ int flush_failed;
uint32_t region_size;
unsigned int region_count;
region_t sync_count;
@@ -233,6 +237,7 @@ struct log_c {
* Disk log fields
*/
int log_dev_failed;
+ int log_dev_flush_failed;
struct dm_dev *log_dev;
struct log_header header;
@@ -253,14 +258,14 @@ static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
ext2_set_bit(bit, (unsigned long *) bs);
- l->touched = 1;
+ l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
ext2_clear_bit(bit, (unsigned long *) bs);
- l->touched = 1;
+ l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
@@ -287,6 +292,19 @@ static int rw_header(struct log_c *lc, int rw)
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
+static int flush_header(struct log_c *lc)
+{
+ struct dm_io_region null_location = {
+ .bdev = lc->header_location.bdev,
+ .sector = 0,
+ .count = 0,
+ };
+
+ lc->io_req.bi_rw = WRITE_BARRIER;
+
+ return dm_io(&lc->io_req, 1, &null_location, NULL);
+}
+
static int read_header(struct log_c *log)
{
int r;
@@ -378,7 +396,9 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
}
lc->ti = ti;
- lc->touched = 0;
+ lc->touched_dirtied = 0;
+ lc->touched_cleaned = 0;
+ lc->flush_failed = 0;
lc->region_size = region_size;
lc->region_count = region_count;
lc->sync = sync;
@@ -406,6 +426,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
} else {
lc->log_dev = dev;
lc->log_dev_failed = 0;
+ lc->log_dev_flush_failed = 0;
lc->header_location.bdev = lc->log_dev->bdev;
lc->header_location.sector = 0;
@@ -614,6 +635,11 @@ static int disk_resume(struct dm_dirty_log *log)
/* write the new header */
r = rw_header(lc, WRITE);
+ if (!r) {
+ r = flush_header(lc);
+ if (r)
+ lc->log_dev_flush_failed = 1;
+ }
if (r) {
DMWARN("%s: Failed to write header on dirty region log device",
lc->log_dev->name);
@@ -656,18 +682,40 @@ static int core_flush(struct dm_dirty_log *log)
static int disk_flush(struct dm_dirty_log *log)
{
- int r;
- struct log_c *lc = (struct log_c *) log->context;
+ int r, i;
+ struct log_c *lc = log->context;
/* only write if the log has changed */
- if (!lc->touched)
+ if (!lc->touched_cleaned && !lc->touched_dirtied)
return 0;
+ if (lc->touched_cleaned && log->flush_callback_fn &&
+ log->flush_callback_fn(lc->ti)) {
+ /*
+ * At this point it is impossible to determine which
+ * regions are clean and which are dirty (without
+ * re-reading the log off disk). So mark all of them
+ * dirty.
+ */
+ lc->flush_failed = 1;
+ for (i = 0; i < lc->region_count; i++)
+ log_clear_bit(lc, lc->clean_bits, i);
+ }
+
r = rw_header(lc, WRITE);
if (r)
fail_log_device(lc);
- else
- lc->touched = 0;
+ else {
+ if (lc->touched_dirtied) {
+ r = flush_header(lc);
+ if (r) {
+ lc->log_dev_flush_failed = 1;
+ fail_log_device(lc);
+ } else
+ lc->touched_dirtied = 0;
+ }
+ lc->touched_cleaned = 0;
+ }
return r;
}
@@ -681,7 +729,8 @@ static void core_mark_region(struct dm_dirty_log *log, region_t region)
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
- log_set_bit(lc, lc->clean_bits, region);
+ if (likely(!lc->flush_failed))
+ log_set_bit(lc, lc->clean_bits, region);
}
static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
@@ -762,7 +811,9 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status,
switch(status) {
case STATUSTYPE_INFO:
DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
- lc->log_dev_failed ? 'D' : 'A');
+ lc->log_dev_flush_failed ? 'F' :
+ lc->log_dev_failed ? 'D' :
+ 'A');
break;
case STATUSTYPE_TABLE:
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index dce971dbdfa3..e81345a1d08f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -93,6 +93,10 @@ struct multipath {
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
+
+ struct mutex work_mutex;
+
+ unsigned suspended; /* Don't create new I/O internally when set. */
};
/*
@@ -198,6 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
+ mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -885,13 +890,18 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
return r;
}
-static void multipath_dtr(struct dm_target *ti)
+static void flush_multipath_work(void)
{
- struct multipath *m = (struct multipath *) ti->private;
-
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
flush_scheduled_work();
+}
+
+static void multipath_dtr(struct dm_target *ti)
+{
+ struct multipath *m = ti->private;
+
+ flush_multipath_work();
free_multipath(m);
}
@@ -1261,6 +1271,16 @@ static void multipath_presuspend(struct dm_target *ti)
queue_if_no_path(m, 0, 1);
}
+static void multipath_postsuspend(struct dm_target *ti)
+{
+ struct multipath *m = ti->private;
+
+ mutex_lock(&m->work_mutex);
+ m->suspended = 1;
+ flush_multipath_work();
+ mutex_unlock(&m->work_mutex);
+}
+
/*
* Restore the queue_if_no_path setting.
*/
@@ -1269,6 +1289,10 @@ static void multipath_resume(struct dm_target *ti)
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
+ mutex_lock(&m->work_mutex);
+ m->suspended = 0;
+ mutex_unlock(&m->work_mutex);
+
spin_lock_irqsave(&m->lock, flags);
m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1397,51 +1421,71 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
- int r;
+ int r = -EINVAL;
struct dm_dev *dev;
struct multipath *m = (struct multipath *) ti->private;
action_fn action;
+ mutex_lock(&m->work_mutex);
+
+ if (m->suspended) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ if (dm_suspended(ti)) {
+ r = -EBUSY;
+ goto out;
+ }
+
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
- return queue_if_no_path(m, 1, 0);
- else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
- return queue_if_no_path(m, 0, 0);
+ if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
+ r = queue_if_no_path(m, 1, 0);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
+ r = queue_if_no_path(m, 0, 0);
+ goto out;
+ }
}
- if (argc != 2)
- goto error;
+ if (argc != 2) {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
- if (!strnicmp(argv[0], MESG_STR("disable_group")))
- return bypass_pg_num(m, argv[1], 1);
- else if (!strnicmp(argv[0], MESG_STR("enable_group")))
- return bypass_pg_num(m, argv[1], 0);
- else if (!strnicmp(argv[0], MESG_STR("switch_group")))
- return switch_pg_num(m, argv[1]);
- else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
+ r = bypass_pg_num(m, argv[1], 1);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
+ r = bypass_pg_num(m, argv[1], 0);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
+ r = switch_pg_num(m, argv[1]);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
action = reinstate_path;
else if (!strnicmp(argv[0], MESG_STR("fail_path")))
action = fail_path;
- else
- goto error;
+ else {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
r = dm_get_device(ti, argv[1], ti->begin, ti->len,
dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
- return -EINVAL;
+ goto out;
}
r = action_dev(m, dev, action);
dm_put_device(ti, dev);
+out:
+ mutex_unlock(&m->work_mutex);
return r;
-
-error:
- DMWARN("Unrecognised multipath message received.");
- return -EINVAL;
}
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -1567,13 +1611,14 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
.map_rq = multipath_map,
.rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
+ .postsuspend = multipath_postsuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index cc9dc79b0784..ad779bd13aec 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -35,6 +35,7 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
*---------------------------------------------------------------*/
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
+ DM_RAID1_FLUSH_ERROR,
DM_RAID1_SYNC_ERROR,
DM_RAID1_READ_ERROR
};
@@ -57,6 +58,7 @@ struct mirror_set {
struct bio_list reads;
struct bio_list writes;
struct bio_list failures;
+ struct bio_list holds; /* bios are waiting until suspend */
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
@@ -67,6 +69,7 @@ struct mirror_set {
region_t nr_regions;
int in_sync;
int log_failure;
+ int leg_failure;
atomic_t suspend;
atomic_t default_mirror; /* Default mirror */
@@ -179,6 +182,17 @@ static void set_default_mirror(struct mirror *m)
atomic_set(&ms->default_mirror, m - m0);
}
+static struct mirror *get_valid_mirror(struct mirror_set *ms)
+{
+ struct mirror *m;
+
+ for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+ if (!atomic_read(&m->error_count))
+ return m;
+
+ return NULL;
+}
+
/* fail_mirror
* @m: mirror device to fail
* @error_type: one of the enum's, DM_RAID1_*_ERROR
@@ -198,6 +212,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
struct mirror_set *ms = m->ms;
struct mirror *new;
+ ms->leg_failure = 1;
+
/*
* error_count is used for nothing more than a
* simple way to tell if a device has encountered
@@ -224,19 +240,50 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
goto out;
}
- for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
- if (!atomic_read(&new->error_count)) {
- set_default_mirror(new);
- break;
- }
-
- if (unlikely(new == ms->mirror + ms->nr_mirrors))
+ new = get_valid_mirror(ms);
+ if (new)
+ set_default_mirror(new);
+ else
DMWARN("All sides of mirror have failed.");
out:
schedule_work(&ms->trigger_event);
}
+static int mirror_flush(struct dm_target *ti)
+{
+ struct mirror_set *ms = ti->private;
+ unsigned long error_bits;
+
+ unsigned int i;
+ struct dm_io_region io[ms->nr_mirrors];
+ struct mirror *m;
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE_BARRIER,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.bvec = NULL,
+ .client = ms->io_client,
+ };
+
+ for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
+ io[i].bdev = m->dev->bdev;
+ io[i].sector = 0;
+ io[i].count = 0;
+ }
+
+ error_bits = -1;
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
+ if (unlikely(error_bits != 0)) {
+ for (i = 0; i < ms->nr_mirrors; i++)
+ if (test_bit(i, &error_bits))
+ fail_mirror(ms->mirror + i,
+ DM_RAID1_FLUSH_ERROR);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*-----------------------------------------------------------------
* Recovery.
*
@@ -396,6 +443,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
+ if (unlikely(!bio->bi_size))
+ return 0;
return m->offset + (bio->bi_sector - m->ms->ti->begin);
}
@@ -413,6 +462,27 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
io->count = bio->bi_size >> 9;
}
+static void hold_bio(struct mirror_set *ms, struct bio *bio)
+{
+ /*
+ * If device is suspended, complete the bio.
+ */
+ if (atomic_read(&ms->suspend)) {
+ if (dm_noflush_suspending(ms->ti))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ else
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ /*
+ * Hold bio until the suspend is complete.
+ */
+ spin_lock_irq(&ms->lock);
+ bio_list_add(&ms->holds, bio);
+ spin_unlock_irq(&ms->lock);
+}
+
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
@@ -511,7 +581,6 @@ static void write_callback(unsigned long error, void *context)
unsigned i, ret = 0;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
- int uptodate = 0;
int should_wake = 0;
unsigned long flags;
@@ -524,36 +593,27 @@ static void write_callback(unsigned long error, void *context)
* This way we handle both writes to SYNC and NOSYNC
* regions with the same code.
*/
- if (likely(!error))
- goto out;
+ if (likely(!error)) {
+ bio_endio(bio, ret);
+ return;
+ }
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
- else
- uptodate = 1;
- if (unlikely(!uptodate)) {
- DMERR("All replicated volumes dead, failing I/O");
- /* None of the writes succeeded, fail the I/O. */
- ret = -EIO;
- } else if (errors_handled(ms)) {
- /*
- * Need to raise event. Since raising
- * events can block, we need to do it in
- * the main thread.
- */
- spin_lock_irqsave(&ms->lock, flags);
- if (!ms->failures.head)
- should_wake = 1;
- bio_list_add(&ms->failures, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
- if (should_wake)
- wakeup_mirrord(ms);
- return;
- }
-out:
- bio_endio(bio, ret);
+ /*
+ * Need to raise event. Since raising
+ * events can block, we need to do it in
+ * the main thread.
+ */
+ spin_lock_irqsave(&ms->lock, flags);
+ if (!ms->failures.head)
+ should_wake = 1;
+ bio_list_add(&ms->failures, bio);
+ spin_unlock_irqrestore(&ms->lock, flags);
+ if (should_wake)
+ wakeup_mirrord(ms);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -562,7 +622,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE,
+ .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = write_callback,
@@ -603,6 +663,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio_list_add(&sync, bio);
+ continue;
+ }
+
region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->is_remote_recovering &&
@@ -672,8 +737,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- map_bio(get_default_mirror(ms), bio);
- generic_make_request(bio);
+ if (unlikely(ms->leg_failure) && errors_handled(ms))
+ hold_bio(ms, bio);
+ else {
+ map_bio(get_default_mirror(ms), bio);
+ generic_make_request(bio);
+ }
}
}
@@ -681,20 +750,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
struct bio *bio;
- if (!failures->head)
- return;
-
- if (!ms->log_failure) {
- while ((bio = bio_list_pop(failures))) {
- ms->in_sync = 0;
- dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
- }
+ if (likely(!failures->head))
return;
- }
/*
* If the log has failed, unattempted writes are being
- * put on the failures list. We can't issue those writes
+ * put on the holds list. We can't issue those writes
* until a log has been marked, so we must store them.
*
* If a 'noflush' suspend is in progress, we can requeue
@@ -709,23 +770,27 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
* for us to treat them the same and requeue them
* as well.
*/
- if (dm_noflush_suspending(ms->ti)) {
- while ((bio = bio_list_pop(failures)))
- bio_endio(bio, DM_ENDIO_REQUEUE);
- return;
- }
+ while ((bio = bio_list_pop(failures))) {
+ if (!ms->log_failure) {
+ ms->in_sync = 0;
+ dm_rh_mark_nosync(ms->rh, bio);
+ }
- if (atomic_read(&ms->suspend)) {
- while ((bio = bio_list_pop(failures)))
+ /*
+ * If all the legs are dead, fail the I/O.
+ * If we have been told to handle errors, hold the bio
+ * and wait for userspace to deal with the problem.
+ * Otherwise pretend that the I/O succeeded. (This would
+ * be wrong if the failed leg returned after reboot and
+ * got replicated back to the good legs.)
+ */
+ if (!get_valid_mirror(ms))
bio_endio(bio, -EIO);
- return;
+ else if (errors_handled(ms))
+ hold_bio(ms, bio);
+ else
+ bio_endio(bio, 0);
}
-
- spin_lock_irq(&ms->lock);
- bio_list_merge(&ms->failures, failures);
- spin_unlock_irq(&ms->lock);
-
- delayed_wake(ms);
}
static void trigger_event(struct work_struct *work)
@@ -784,12 +849,17 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
}
spin_lock_init(&ms->lock);
+ bio_list_init(&ms->reads);
+ bio_list_init(&ms->writes);
+ bio_list_init(&ms->failures);
+ bio_list_init(&ms->holds);
ms->ti = ti;
ms->nr_mirrors = nr_mirrors;
ms->nr_regions = dm_sector_div_up(ti->len, region_size);
ms->in_sync = 0;
ms->log_failure = 0;
+ ms->leg_failure = 0;
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
@@ -889,7 +959,8 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return NULL;
}
- dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
+ dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
+ argv + 2);
if (!dl) {
ti->error = "Error creating mirror dirty log";
return NULL;
@@ -995,6 +1066,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
+ ti->num_flush_requests = 1;
ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!ms->kmirrord_wq) {
@@ -1122,7 +1194,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- dm_rh_dec(ms->rh, map_context->ll);
+ if (likely(!bio_empty_barrier(bio)))
+ dm_rh_dec(ms->rh, map_context->ll);
return error;
}
@@ -1180,6 +1253,9 @@ static void mirror_presuspend(struct dm_target *ti)
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ struct bio_list holds;
+ struct bio *bio;
+
atomic_set(&ms->suspend, 1);
/*
@@ -1202,6 +1278,22 @@ static void mirror_presuspend(struct dm_target *ti)
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
+
+ /*
+ * Now set ms->suspend is set and the workqueue flushed, no more
+ * entries can be added to ms->hold list, so process it.
+ *
+ * Bios can still arrive concurrently with or after this
+ * presuspend function, but they cannot join the hold list
+ * because ms->suspend is set.
+ */
+ spin_lock_irq(&ms->lock);
+ holds = ms->holds;
+ bio_list_init(&ms->holds);
+ spin_unlock_irq(&ms->lock);
+
+ while ((bio = bio_list_pop(&holds)))
+ hold_bio(ms, bio);
}
static void mirror_postsuspend(struct dm_target *ti)
@@ -1244,7 +1336,8 @@ static char device_status_char(struct mirror *m)
if (!atomic_read(&(m->error_count)))
return 'A';
- return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
+ return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+ (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 36dbe29f2fd6..5f19ceb6fe91 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -79,6 +79,11 @@ struct dm_region_hash {
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
+ /*
+ * If there was a barrier failure no regions can be marked clean.
+ */
+ int barrier_failure;
+
void *context;
sector_t target_begin;
@@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create(
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
INIT_LIST_HEAD(&rh->failed_recovered_regions);
+ rh->barrier_failure = 0;
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct dm_region));
@@ -377,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success)
/* dm_rh_mark_nosync
* @ms
* @bio
- * @done
- * @error
*
* The bio was written on some mirror(s) but failed on other mirror(s).
* We can successfully endio the bio but should avoid the region being
@@ -386,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success)
*
* This function is _not_ safe in interrupt context!
*/
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error)
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
{
unsigned long flags;
struct dm_dirty_log *log = rh->log;
@@ -395,6 +398,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
+ if (bio_empty_barrier(bio)) {
+ rh->barrier_failure = 1;
+ return;
+ }
+
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
@@ -419,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
BUG_ON(!list_empty(&reg->list));
spin_unlock_irqrestore(&rh->region_lock, flags);
- bio_endio(bio, error);
if (recovering)
complete_resync_work(reg, 0);
}
@@ -515,8 +522,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
{
struct bio *bio;
- for (bio = bios->head; bio; bio = bio->bi_next)
+ for (bio = bios->head; bio; bio = bio->bi_next) {
+ if (bio_empty_barrier(bio))
+ continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ }
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
@@ -544,7 +554,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region)
*/
/* do nothing for DM_RH_NOSYNC */
- if (reg->state == DM_RH_RECOVERING) {
+ if (unlikely(rh->barrier_failure)) {
+ /*
+ * If a write barrier failed some time ago, we
+ * don't know whether or not this write made it
+ * to the disk, so we must resync the device.
+ */
+ reg->state = DM_RH_NOSYNC;
+ } else if (reg->state == DM_RH_RECOVERING) {
list_add_tail(&reg->list, &rh->quiesced_regions);
} else if (reg->state == DM_RH_DIRTY) {
reg->state = DM_RH_CLEAN;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 0c746420c008..7d08879689ac 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -55,6 +55,8 @@
*/
#define SNAPSHOT_DISK_VERSION 1
+#define NUM_SNAPSHOT_HDR_CHUNKS 1
+
struct disk_header {
uint32_t magic;
@@ -120,7 +122,22 @@ struct pstore {
/*
* The next free chunk for an exception.
+ *
+ * When creating exceptions, all the chunks here and above are
+ * free. It holds the next chunk to be allocated. On rare
+ * occasions (e.g. after a system crash) holes can be left in
+ * the exception store because chunks can be committed out of
+ * order.
+ *
+ * When merging exceptions, it does not necessarily mean all the
+ * chunks here and above are free. It holds the value it would
+ * have held if all chunks had been committed in order of
+ * allocation. Consequently the value may occasionally be
+ * slightly too low, but since it's only used for 'status' and
+ * it can never reach its minimum value too early this doesn't
+ * matter.
*/
+
chunk_t next_free;
/*
@@ -214,7 +231,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
int metadata)
{
struct dm_io_region where = {
- .bdev = ps->store->cow->bdev,
+ .bdev = dm_snap_cow(ps->store->snap)->bdev,
.sector = ps->store->chunk_size * chunk,
.count = ps->store->chunk_size,
};
@@ -294,7 +311,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
- bdev_logical_block_size(ps->store->cow->bdev) >> 9);
+ bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
+ bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
@@ -408,6 +426,15 @@ static void write_exception(struct pstore *ps,
e->new_chunk = cpu_to_le64(de->new_chunk);
}
+static void clear_exception(struct pstore *ps, uint32_t index)
+{
+ struct disk_exception *e = get_exception(ps, index);
+
+ /* clear it */
+ e->old_chunk = 0;
+ e->new_chunk = 0;
+}
+
/*
* Registers the exceptions that are present in the current area.
* 'full' is filled in to indicate if the area has been
@@ -489,11 +516,23 @@ static struct pstore *get_info(struct dm_exception_store *store)
return (struct pstore *) store->context;
}
-static void persistent_fraction_full(struct dm_exception_store *store,
- sector_t *numerator, sector_t *denominator)
+static void persistent_usage(struct dm_exception_store *store,
+ sector_t *total_sectors,
+ sector_t *sectors_allocated,
+ sector_t *metadata_sectors)
{
- *numerator = get_info(store)->next_free * store->chunk_size;
- *denominator = get_dev_size(store->cow->bdev);
+ struct pstore *ps = get_info(store);
+
+ *sectors_allocated = ps->next_free * store->chunk_size;
+ *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+ /*
+ * First chunk is the fixed header.
+ * Then there are (ps->current_area + 1) metadata chunks, each one
+ * separated from the next by ps->exceptions_per_area data chunks.
+ */
+ *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
+ store->chunk_size;
}
static void persistent_dtr(struct dm_exception_store *store)
@@ -552,44 +591,40 @@ static int persistent_read_metadata(struct dm_exception_store *store,
ps->current_area = 0;
zero_memory_area(ps);
r = zero_disk_area(ps, 0);
- if (r) {
+ if (r)
DMWARN("zero_disk_area(0) failed");
- return r;
- }
- } else {
- /*
- * Sanity checks.
- */
- if (ps->version != SNAPSHOT_DISK_VERSION) {
- DMWARN("unable to handle snapshot disk version %d",
- ps->version);
- return -EINVAL;
- }
+ return r;
+ }
+ /*
+ * Sanity checks.
+ */
+ if (ps->version != SNAPSHOT_DISK_VERSION) {
+ DMWARN("unable to handle snapshot disk version %d",
+ ps->version);
+ return -EINVAL;
+ }
- /*
- * Metadata are valid, but snapshot is invalidated
- */
- if (!ps->valid)
- return 1;
+ /*
+ * Metadata are valid, but snapshot is invalidated
+ */
+ if (!ps->valid)
+ return 1;
- /*
- * Read the metadata.
- */
- r = read_exceptions(ps, callback, callback_context);
- if (r)
- return r;
- }
+ /*
+ * Read the metadata.
+ */
+ r = read_exceptions(ps, callback, callback_context);
- return 0;
+ return r;
}
static int persistent_prepare_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e)
+ struct dm_exception *e)
{
struct pstore *ps = get_info(store);
uint32_t stride;
chunk_t next_free;
- sector_t size = get_dev_size(store->cow->bdev);
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
/* Is there enough room ? */
if (size < ((ps->next_free + 1) * store->chunk_size))
@@ -611,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
}
static void persistent_commit_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -672,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store,
ps->callback_count = 0;
}
+static int persistent_prepare_merge(struct dm_exception_store *store,
+ chunk_t *last_old_chunk,
+ chunk_t *last_new_chunk)
+{
+ struct pstore *ps = get_info(store);
+ struct disk_exception de;
+ int nr_consecutive;
+ int r;
+
+ /*
+ * When current area is empty, move back to preceding area.
+ */
+ if (!ps->current_committed) {
+ /*
+ * Have we finished?
+ */
+ if (!ps->current_area)
+ return 0;
+
+ ps->current_area--;
+ r = area_io(ps, READ);
+ if (r < 0)
+ return r;
+ ps->current_committed = ps->exceptions_per_area;
+ }
+
+ read_exception(ps, ps->current_committed - 1, &de);
+ *last_old_chunk = de.old_chunk;
+ *last_new_chunk = de.new_chunk;
+
+ /*
+ * Find number of consecutive chunks within the current area,
+ * working backwards.
+ */
+ for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
+ nr_consecutive++) {
+ read_exception(ps, ps->current_committed - 1 - nr_consecutive,
+ &de);
+ if (de.old_chunk != *last_old_chunk - nr_consecutive ||
+ de.new_chunk != *last_new_chunk - nr_consecutive)
+ break;
+ }
+
+ return nr_consecutive;
+}
+
+static int persistent_commit_merge(struct dm_exception_store *store,
+ int nr_merged)
+{
+ int r, i;
+ struct pstore *ps = get_info(store);
+
+ BUG_ON(nr_merged > ps->current_committed);
+
+ for (i = 0; i < nr_merged; i++)
+ clear_exception(ps, ps->current_committed - 1 - i);
+
+ r = area_io(ps, WRITE);
+ if (r < 0)
+ return r;
+
+ ps->current_committed -= nr_merged;
+
+ /*
+ * At this stage, only persistent_usage() uses ps->next_free, so
+ * we make no attempt to keep ps->next_free strictly accurate
+ * as exceptions may have been committed out-of-order originally.
+ * Once a snapshot has become merging, we set it to the value it
+ * would have held had all the exceptions been committed in order.
+ *
+ * ps->current_area does not get reduced by prepare_merge() until
+ * after commit_merge() has removed the nr_merged previous exceptions.
+ */
+ ps->next_free = (area_location(ps, ps->current_area) - 1) +
+ (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
+
+ return 0;
+}
+
static void persistent_drop_snapshot(struct dm_exception_store *store)
{
struct pstore *ps = get_info(store);
@@ -697,7 +811,7 @@ static int persistent_ctr(struct dm_exception_store *store,
ps->area = NULL;
ps->zero_area = NULL;
ps->header_area = NULL;
- ps->next_free = 2; /* skipping the header and first area */
+ ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
ps->current_committed = 0;
ps->callback_count = 0;
@@ -726,8 +840,7 @@ static unsigned persistent_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" %s P %llu", store->cow->name,
- (unsigned long long)store->chunk_size);
+ DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
}
return sz;
@@ -741,8 +854,10 @@ static struct dm_exception_store_type _persistent_type = {
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
+ .prepare_merge = persistent_prepare_merge,
+ .commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
- .fraction_full = persistent_fraction_full,
+ .usage = persistent_usage,
.status = persistent_status,
};
@@ -754,8 +869,10 @@ static struct dm_exception_store_type _persistent_compat_type = {
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
+ .prepare_merge = persistent_prepare_merge,
+ .commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
- .fraction_full = persistent_fraction_full,
+ .usage = persistent_usage,
.status = persistent_status,
};
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index cde5aa558e6d..a0898a66a2f8 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -36,10 +36,10 @@ static int transient_read_metadata(struct dm_exception_store *store,
}
static int transient_prepare_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e)
+ struct dm_exception *e)
{
struct transient_c *tc = store->context;
- sector_t size = get_dev_size(store->cow->bdev);
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
if (size < (tc->next_free + store->chunk_size))
return -1;
@@ -51,7 +51,7 @@ static int transient_prepare_exception(struct dm_exception_store *store,
}
static void transient_commit_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -59,11 +59,14 @@ static void transient_commit_exception(struct dm_exception_store *store,
callback(callback_context, 1);
}
-static void transient_fraction_full(struct dm_exception_store *store,
- sector_t *numerator, sector_t *denominator)
+static void transient_usage(struct dm_exception_store *store,
+ sector_t *total_sectors,
+ sector_t *sectors_allocated,
+ sector_t *metadata_sectors)
{
- *numerator = ((struct transient_c *) store->context)->next_free;
- *denominator = get_dev_size(store->cow->bdev);
+ *sectors_allocated = ((struct transient_c *) store->context)->next_free;
+ *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
+ *metadata_sectors = 0;
}
static int transient_ctr(struct dm_exception_store *store,
@@ -91,8 +94,7 @@ static unsigned transient_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" %s N %llu", store->cow->name,
- (unsigned long long)store->chunk_size);
+ DMEMIT(" N %llu", (unsigned long long)store->chunk_size);
}
return sz;
@@ -106,7 +108,7 @@ static struct dm_exception_store_type _transient_type = {
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
- .fraction_full = transient_fraction_full,
+ .usage = transient_usage,
.status = transient_status,
};
@@ -118,7 +120,7 @@ static struct dm_exception_store_type _transient_compat_type = {
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
- .fraction_full = transient_fraction_full,
+ .usage = transient_usage,
.status = transient_status,
};
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3a3ba46e6d4b..ee8eb283650d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -25,6 +25,11 @@
#define DM_MSG_PREFIX "snapshots"
+static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
+
+#define dm_target_is_snapshot_merge(ti) \
+ ((ti)->type->name == dm_snapshot_merge_target_name)
+
/*
* The percentage increment we will wake up users at
*/
@@ -49,7 +54,7 @@
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
-struct exception_table {
+struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
struct list_head *table;
@@ -59,22 +64,31 @@ struct dm_snapshot {
struct rw_semaphore lock;
struct dm_dev *origin;
+ struct dm_dev *cow;
+
+ struct dm_target *ti;
/* List of snapshots per Origin */
struct list_head list;
- /* You can't use a snapshot if this is 0 (e.g. if full) */
+ /*
+ * You can't use a snapshot if this is 0 (e.g. if full).
+ * A snapshot-merge target never clears this.
+ */
int valid;
/* Origin writes don't trigger exceptions until this is set */
int active;
+ /* Whether or not owning mapped_device is suspended */
+ int suspended;
+
mempool_t *pending_pool;
atomic_t pending_exceptions_count;
- struct exception_table pending;
- struct exception_table complete;
+ struct dm_exception_table pending;
+ struct dm_exception_table complete;
/*
* pe_lock protects all pending_exception operations and access
@@ -95,8 +109,51 @@ struct dm_snapshot {
mempool_t *tracked_chunk_pool;
spinlock_t tracked_chunk_lock;
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
+ /*
+ * The merge operation failed if this flag is set.
+ * Failure modes are handled as follows:
+ * - I/O error reading the header
+ * => don't load the target; abort.
+ * - Header does not have "valid" flag set
+ * => use the origin; forget about the snapshot.
+ * - I/O error when reading exceptions
+ * => don't load the target; abort.
+ * (We can't use the intermediate origin state.)
+ * - I/O error while merging
+ * => stop merging; set merge_failed; process I/O normally.
+ */
+ int merge_failed;
+
+ /* Wait for events based on state_bits */
+ unsigned long state_bits;
+
+ /* Range of chunks currently being merged. */
+ chunk_t first_merging_chunk;
+ int num_merging_chunks;
+
+ /*
+ * Incoming bios that overlap with chunks being merged must wait
+ * for them to be committed.
+ */
+ struct bio_list bios_queued_during_merge;
};
+/*
+ * state_bits:
+ * RUNNING_MERGE - Merge operation is in progress.
+ * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
+ * cleared afterwards.
+ */
+#define RUNNING_MERGE 0
+#define SHUTDOWN_MERGE 1
+
+struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
+{
+ return s->cow;
+}
+EXPORT_SYMBOL(dm_snap_cow);
+
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);
@@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
}
struct dm_snap_pending_exception {
- struct dm_snap_exception e;
+ struct dm_exception e;
/*
* Origin buffers waiting for this to complete are held
@@ -125,28 +182,6 @@ struct dm_snap_pending_exception {
struct bio_list origin_bios;
struct bio_list snapshot_bios;
- /*
- * Short-term queue of pending exceptions prior to submission.
- */
- struct list_head list;
-
- /*
- * The primary pending_exception is the one that holds
- * the ref_count and the list of origin_bios for a
- * group of pending_exceptions. It is always last to get freed.
- * These fields get set up when writing to the origin.
- */
- struct dm_snap_pending_exception *primary_pe;
-
- /*
- * Number of pending_exceptions processing this chunk.
- * When this drops to zero we must complete the origin bios.
- * If incrementing or decrementing this, hold pe->snap->lock for
- * the sibling concerned and not pe->primary_pe->snap->lock unless
- * they are the same.
- */
- atomic_t ref_count;
-
/* Pointer back to snapshot context */
struct dm_snapshot *snap;
@@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
}
/*
+ * This conflicting I/O is extremely improbable in the caller,
+ * so msleep(1) is sufficient and there is no need for a wait queue.
+ */
+static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
+{
+ while (__chunk_is_tracked(s, chunk))
+ msleep(1);
+}
+
+/*
* One of these per registered origin, held in the snapshot_origins hash
*/
struct origin {
@@ -243,6 +288,10 @@ struct origin {
static struct list_head *_origins;
static struct rw_semaphore _origins_lock;
+static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
+static uint64_t _pending_exceptions_done_count;
+
static int init_origin_hash(void)
{
int i;
@@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o)
}
/*
+ * _origins_lock must be held when calling this function.
+ * Returns number of snapshots registered using the supplied cow device, plus:
+ * snap_src - a snapshot suitable for use as a source of exception handover
+ * snap_dest - a snapshot capable of receiving exception handover.
+ * snap_merge - an existing snapshot-merge target linked to the same origin.
+ * There can be at most one snapshot-merge target. The parameter is optional.
+ *
+ * Possible return values and states of snap_src and snap_dest.
+ * 0: NULL, NULL - first new snapshot
+ * 1: snap_src, NULL - normal snapshot
+ * 2: snap_src, snap_dest - waiting for handover
+ * 2: snap_src, NULL - handed over, waiting for old to be deleted
+ * 1: NULL, snap_dest - source got destroyed without handover
+ */
+static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
+ struct dm_snapshot **snap_src,
+ struct dm_snapshot **snap_dest,
+ struct dm_snapshot **snap_merge)
+{
+ struct dm_snapshot *s;
+ struct origin *o;
+ int count = 0;
+ int active;
+
+ o = __lookup_origin(snap->origin->bdev);
+ if (!o)
+ goto out;
+
+ list_for_each_entry(s, &o->snapshots, list) {
+ if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
+ *snap_merge = s;
+ if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
+ continue;
+
+ down_read(&s->lock);
+ active = s->active;
+ up_read(&s->lock);
+
+ if (active) {
+ if (snap_src)
+ *snap_src = s;
+ } else if (snap_dest)
+ *snap_dest = s;
+
+ count++;
+ }
+
+out:
+ return count;
+}
+
+/*
+ * On success, returns 1 if this snapshot is a handover destination,
+ * otherwise returns 0.
+ */
+static int __validate_exception_handover(struct dm_snapshot *snap)
+{
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+ struct dm_snapshot *snap_merge = NULL;
+
+ /* Does snapshot need exceptions handed over to it? */
+ if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
+ &snap_merge) == 2) ||
+ snap_dest) {
+ snap->ti->error = "Snapshot cow pairing for exception "
+ "table handover failed";
+ return -EINVAL;
+ }
+
+ /*
+ * If no snap_src was found, snap cannot become a handover
+ * destination.
+ */
+ if (!snap_src)
+ return 0;
+
+ /*
+ * Non-snapshot-merge handover?
+ */
+ if (!dm_target_is_snapshot_merge(snap->ti))
+ return 1;
+
+ /*
+ * Do not allow more than one merging snapshot.
+ */
+ if (snap_merge) {
+ snap->ti->error = "A snapshot is already merging.";
+ return -EINVAL;
+ }
+
+ if (!snap_src->store->type->prepare_merge ||
+ !snap_src->store->type->commit_merge) {
+ snap->ti->error = "Snapshot exception store does not "
+ "support snapshot-merge.";
+ return -EINVAL;
+ }
+
+ return 1;
+}
+
+static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
+{
+ struct dm_snapshot *l;
+
+ /* Sort the list according to chunk size, largest-first smallest-last */
+ list_for_each_entry(l, &o->snapshots, list)
+ if (l->store->chunk_size < s->store->chunk_size)
+ break;
+ list_add_tail(&s->list, &l->list);
+}
+
+/*
* Make a note of the snapshot and its origin so we can look it
* up when the origin has a write on it.
+ *
+ * Also validate snapshot exception store handovers.
+ * On success, returns 1 if this registration is a handover destination,
+ * otherwise returns 0.
*/
static int register_snapshot(struct dm_snapshot *snap)
{
- struct dm_snapshot *l;
- struct origin *o, *new_o;
+ struct origin *o, *new_o = NULL;
struct block_device *bdev = snap->origin->bdev;
+ int r = 0;
new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
if (!new_o)
return -ENOMEM;
down_write(&_origins_lock);
- o = __lookup_origin(bdev);
+ r = __validate_exception_handover(snap);
+ if (r < 0) {
+ kfree(new_o);
+ goto out;
+ }
+
+ o = __lookup_origin(bdev);
if (o)
kfree(new_o);
else {
@@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap)
__insert_origin(o);
}
- /* Sort the list according to chunk size, largest-first smallest-last */
- list_for_each_entry(l, &o->snapshots, list)
- if (l->store->chunk_size < snap->store->chunk_size)
- break;
- list_add_tail(&snap->list, &l->list);
+ __insert_snapshot(o, snap);
+
+out:
+ up_write(&_origins_lock);
+
+ return r;
+}
+
+/*
+ * Move snapshot to correct place in list according to chunk size.
+ */
+static void reregister_snapshot(struct dm_snapshot *s)
+{
+ struct block_device *bdev = s->origin->bdev;
+
+ down_write(&_origins_lock);
+
+ list_del(&s->list);
+ __insert_snapshot(__lookup_origin(bdev), s);
up_write(&_origins_lock);
- return 0;
}
static void unregister_snapshot(struct dm_snapshot *s)
@@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s)
o = __lookup_origin(s->origin->bdev);
list_del(&s->list);
- if (list_empty(&o->snapshots)) {
+ if (o && list_empty(&o->snapshots)) {
list_del(&o->hash_list);
kfree(o);
}
@@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s)
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
-static int init_exception_table(struct exception_table *et, uint32_t size,
- unsigned hash_shift)
+static int dm_exception_table_init(struct dm_exception_table *et,
+ uint32_t size, unsigned hash_shift)
{
unsigned int i;
@@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size,
return 0;
}
-static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
+static void dm_exception_table_exit(struct dm_exception_table *et,
+ struct kmem_cache *mem)
{
struct list_head *slot;
- struct dm_snap_exception *ex, *next;
+ struct dm_exception *ex, *next;
int i, size;
size = et->hash_mask + 1;
@@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
vfree(et->table);
}
-static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
{
return (chunk >> et->hash_shift) & et->hash_mask;
}
-static void insert_exception(struct exception_table *eh,
- struct dm_snap_exception *e)
-{
- struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
- list_add(&e->hash_list, l);
-}
-
-static void remove_exception(struct dm_snap_exception *e)
+static void dm_remove_exception(struct dm_exception *e)
{
list_del(&e->hash_list);
}
@@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e)
* Return the exception data for a sector, or NULL if not
* remapped.
*/
-static struct dm_snap_exception *lookup_exception(struct exception_table *et,
- chunk_t chunk)
+static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
+ chunk_t chunk)
{
struct list_head *slot;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list)
@@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
return NULL;
}
-static struct dm_snap_exception *alloc_exception(void)
+static struct dm_exception *alloc_completed_exception(void)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
if (!e)
@@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void)
return e;
}
-static void free_exception(struct dm_snap_exception *e)
+static void free_completed_exception(struct dm_exception *e)
{
kmem_cache_free(exception_cache, e);
}
@@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
atomic_dec(&s->pending_exceptions_count);
}
-static void insert_completed_exception(struct dm_snapshot *s,
- struct dm_snap_exception *new_e)
+static void dm_insert_exception(struct dm_exception_table *eh,
+ struct dm_exception *new_e)
{
- struct exception_table *eh = &s->complete;
struct list_head *l;
- struct dm_snap_exception *e = NULL;
+ struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
dm_consecutive_chunk_count(e) + 1)) {
dm_consecutive_chunk_count_inc(e);
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
@@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
dm_consecutive_chunk_count_inc(e);
e->old_chunk--;
e->new_chunk--;
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
@@ -507,9 +684,9 @@ out:
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
struct dm_snapshot *s = context;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e)
return -ENOMEM;
@@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
return 0;
}
+#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
+
+/*
+ * Return a minimum chunk size of all snapshots that have the specified origin.
+ * Return zero if the origin has no snapshots.
+ */
+static sector_t __minimum_chunk_size(struct origin *o)
+{
+ struct dm_snapshot *snap;
+ unsigned chunk_size = 0;
+
+ if (o)
+ list_for_each_entry(snap, &o->snapshots, list)
+ chunk_size = min_not_zero(chunk_size,
+ snap->store->chunk_size);
+
+ return chunk_size;
+}
+
/*
* Hard coded magic.
*/
@@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s)
* Calculate based on the size of the original volume or
* the COW volume...
*/
- cow_dev_size = get_dev_size(s->store->cow->bdev);
+ cow_dev_size = get_dev_size(s->cow->bdev);
origin_dev_size = get_dev_size(s->origin->bdev);
max_buckets = calc_max_buckets();
hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
+ if (hash_size < 64)
+ hash_size = 64;
hash_size = rounddown_pow_of_two(hash_size);
- if (init_exception_table(&s->complete, hash_size,
- DM_CHUNK_CONSECUTIVE_BITS))
+ if (dm_exception_table_init(&s->complete, hash_size,
+ DM_CHUNK_CONSECUTIVE_BITS))
return -ENOMEM;
/*
@@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s)
if (hash_size < 64)
hash_size = 64;
- if (init_exception_table(&s->pending, hash_size, 0)) {
- exit_exception_table(&s->complete, exception_cache);
+ if (dm_exception_table_init(&s->pending, hash_size, 0)) {
+ dm_exception_table_exit(&s->complete, exception_cache);
return -ENOMEM;
}
return 0;
}
+static void merge_shutdown(struct dm_snapshot *s)
+{
+ clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&s->state_bits, RUNNING_MERGE);
+}
+
+static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
+{
+ s->first_merging_chunk = 0;
+ s->num_merging_chunks = 0;
+
+ return bio_list_get(&s->bios_queued_during_merge);
+}
+
+/*
+ * Remove one chunk from the index of completed exceptions.
+ */
+static int __remove_single_exception_chunk(struct dm_snapshot *s,
+ chunk_t old_chunk)
+{
+ struct dm_exception *e;
+
+ e = dm_lookup_exception(&s->complete, old_chunk);
+ if (!e) {
+ DMERR("Corruption detected: exception for block %llu is "
+ "on disk but not in memory",
+ (unsigned long long)old_chunk);
+ return -EINVAL;
+ }
+
+ /*
+ * If this is the only chunk using this exception, remove exception.
+ */
+ if (!dm_consecutive_chunk_count(e)) {
+ dm_remove_exception(e);
+ free_completed_exception(e);
+ return 0;
+ }
+
+ /*
+ * The chunk may be either at the beginning or the end of a
+ * group of consecutive chunks - never in the middle. We are
+ * removing chunks in the opposite order to that in which they
+ * were added, so this should always be true.
+ * Decrement the consecutive chunk counter and adjust the
+ * starting point if necessary.
+ */
+ if (old_chunk == e->old_chunk) {
+ e->old_chunk++;
+ e->new_chunk++;
+ } else if (old_chunk != e->old_chunk +
+ dm_consecutive_chunk_count(e)) {
+ DMERR("Attempt to merge block %llu from the "
+ "middle of a chunk range [%llu - %llu]",
+ (unsigned long long)old_chunk,
+ (unsigned long long)e->old_chunk,
+ (unsigned long long)
+ e->old_chunk + dm_consecutive_chunk_count(e));
+ return -EINVAL;
+ }
+
+ dm_consecutive_chunk_count_dec(e);
+
+ return 0;
+}
+
+static void flush_bios(struct bio *bio);
+
+static int remove_single_exception_chunk(struct dm_snapshot *s)
+{
+ struct bio *b = NULL;
+ int r;
+ chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
+
+ down_write(&s->lock);
+
+ /*
+ * Process chunks (and associated exceptions) in reverse order
+ * so that dm_consecutive_chunk_count_dec() accounting works.
+ */
+ do {
+ r = __remove_single_exception_chunk(s, old_chunk);
+ if (r)
+ goto out;
+ } while (old_chunk-- > s->first_merging_chunk);
+
+ b = __release_queued_bios_after_merge(s);
+
+out:
+ up_write(&s->lock);
+ if (b)
+ flush_bios(b);
+
+ return r;
+}
+
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+ sector_t sector, unsigned chunk_size);
+
+static void merge_callback(int read_err, unsigned long write_err,
+ void *context);
+
+static uint64_t read_pending_exceptions_done_count(void)
+{
+ uint64_t pending_exceptions_done;
+
+ spin_lock(&_pending_exceptions_done_spinlock);
+ pending_exceptions_done = _pending_exceptions_done_count;
+ spin_unlock(&_pending_exceptions_done_spinlock);
+
+ return pending_exceptions_done;
+}
+
+static void increment_pending_exceptions_done_count(void)
+{
+ spin_lock(&_pending_exceptions_done_spinlock);
+ _pending_exceptions_done_count++;
+ spin_unlock(&_pending_exceptions_done_spinlock);
+
+ wake_up_all(&_pending_exceptions_done);
+}
+
+static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+{
+ int i, linear_chunks;
+ chunk_t old_chunk, new_chunk;
+ struct dm_io_region src, dest;
+ sector_t io_size;
+ uint64_t previous_count;
+
+ BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
+ if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
+ goto shut;
+
+ /*
+ * valid flag never changes during merge, so no lock required.
+ */
+ if (!s->valid) {
+ DMERR("Snapshot is invalid: can't merge");
+ goto shut;
+ }
+
+ linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
+ &new_chunk);
+ if (linear_chunks <= 0) {
+ if (linear_chunks < 0) {
+ DMERR("Read error in exception store: "
+ "shutting down merge");
+ down_write(&s->lock);
+ s->merge_failed = 1;
+ up_write(&s->lock);
+ }
+ goto shut;
+ }
+
+ /* Adjust old_chunk and new_chunk to reflect start of linear region */
+ old_chunk = old_chunk + 1 - linear_chunks;
+ new_chunk = new_chunk + 1 - linear_chunks;
+
+ /*
+ * Use one (potentially large) I/O to copy all 'linear_chunks'
+ * from the exception store to the origin
+ */
+ io_size = linear_chunks * s->store->chunk_size;
+
+ dest.bdev = s->origin->bdev;
+ dest.sector = chunk_to_sector(s->store, old_chunk);
+ dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
+
+ src.bdev = s->cow->bdev;
+ src.sector = chunk_to_sector(s->store, new_chunk);
+ src.count = dest.count;
+
+ /*
+ * Reallocate any exceptions needed in other snapshots then
+ * wait for the pending exceptions to complete.
+ * Each time any pending exception (globally on the system)
+ * completes we are woken and repeat the process to find out
+ * if we can proceed. While this may not seem a particularly
+ * efficient algorithm, it is not expected to have any
+ * significant impact on performance.
+ */
+ previous_count = read_pending_exceptions_done_count();
+ while (origin_write_extent(s, dest.sector, io_size)) {
+ wait_event(_pending_exceptions_done,
+ (read_pending_exceptions_done_count() !=
+ previous_count));
+ /* Retry after the wait, until all exceptions are done. */
+ previous_count = read_pending_exceptions_done_count();
+ }
+
+ down_write(&s->lock);
+ s->first_merging_chunk = old_chunk;
+ s->num_merging_chunks = linear_chunks;
+ up_write(&s->lock);
+
+ /* Wait until writes to all 'linear_chunks' drain */
+ for (i = 0; i < linear_chunks; i++)
+ __check_for_conflicting_io(s, old_chunk + i);
+
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+ return;
+
+shut:
+ merge_shutdown(s);
+}
+
+static void error_bios(struct bio *bio);
+
+static void merge_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct dm_snapshot *s = context;
+ struct bio *b = NULL;
+
+ if (read_err || write_err) {
+ if (read_err)
+ DMERR("Read error: shutting down merge.");
+ else
+ DMERR("Write error: shutting down merge.");
+ goto shut;
+ }
+
+ if (s->store->type->commit_merge(s->store,
+ s->num_merging_chunks) < 0) {
+ DMERR("Write error in exception store: shutting down merge");
+ goto shut;
+ }
+
+ if (remove_single_exception_chunk(s) < 0)
+ goto shut;
+
+ snapshot_merge_next_chunks(s);
+
+ return;
+
+shut:
+ down_write(&s->lock);
+ s->merge_failed = 1;
+ b = __release_queued_bios_after_merge(s);
+ up_write(&s->lock);
+ error_bios(b);
+
+ merge_shutdown(s);
+}
+
+static void start_merge(struct dm_snapshot *s)
+{
+ if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
+ snapshot_merge_next_chunks(s);
+}
+
+static int wait_schedule(void *ptr)
+{
+ schedule();
+
+ return 0;
+}
+
+/*
+ * Stop the merging process and wait until it finishes.
+ */
+static void stop_merge(struct dm_snapshot *s)
+{
+ set_bit(SHUTDOWN_MERGE, &s->state_bits);
+ wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
+ TASK_UNINTERRUPTIBLE);
+ clear_bit(SHUTDOWN_MERGE, &s->state_bits);
+}
+
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
@@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_snapshot *s;
int i;
int r = -EINVAL;
- char *origin_path;
- struct dm_exception_store *store;
- unsigned args_used;
+ char *origin_path, *cow_path;
+ unsigned args_used, num_flush_requests = 1;
+ fmode_t origin_mode = FMODE_READ;
if (argc != 4) {
ti->error = "requires exactly 4 arguments";
r = -EINVAL;
- goto bad_args;
+ goto bad;
+ }
+
+ if (dm_target_is_snapshot_merge(ti)) {
+ num_flush_requests = 2;
+ origin_mode = FMODE_WRITE;
}
origin_path = argv[0];
argv++;
argc--;
- r = dm_exception_store_create(ti, argc, argv, &args_used, &store);
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ti->error = "Cannot allocate snapshot context private "
+ "structure";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ cow_path = argv[0];
+ argv++;
+ argc--;
+
+ r = dm_get_device(ti, cow_path, 0, 0,
+ FMODE_READ | FMODE_WRITE, &s->cow);
+ if (r) {
+ ti->error = "Cannot get COW device";
+ goto bad_cow;
+ }
+
+ r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
if (r) {
ti->error = "Couldn't create exception store";
r = -EINVAL;
- goto bad_args;
+ goto bad_store;
}
argv += args_used;
argc -= args_used;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (!s) {
- ti->error = "Cannot allocate snapshot context private "
- "structure";
- r = -ENOMEM;
- goto bad_snap;
- }
-
- r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
+ r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
}
- s->store = store;
+ s->ti = ti;
s->valid = 1;
s->active = 0;
+ s->suspended = 0;
atomic_set(&s->pending_exceptions_count, 0);
init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
+ s->state_bits = 0;
+ s->merge_failed = 0;
+ s->first_merging_chunk = 0;
+ s->num_merging_chunks = 0;
+ bio_list_init(&s->bios_queued_during_merge);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
- /* Metadata must only be loaded into one table at once */
+ bio_list_init(&s->queued_bios);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
+
+ ti->private = s;
+ ti->num_flush_requests = num_flush_requests;
+
+ /* Add snapshot to the list of snapshots for this origin */
+ /* Exceptions aren't triggered till snapshot_resume() is called */
+ r = register_snapshot(s);
+ if (r == -ENOMEM) {
+ ti->error = "Snapshot origin struct allocation failed";
+ goto bad_load_and_register;
+ } else if (r < 0) {
+ /* invalid handover, register_snapshot has set ti->error */
+ goto bad_load_and_register;
+ }
+
+ /*
+ * Metadata must only be loaded into one table at once, so skip this
+ * if metadata will be handed over during resume.
+ * Chunk size will be set during the handover - set it to zero to
+ * ensure it's ignored.
+ */
+ if (r > 0) {
+ s->store->chunk_size = 0;
+ return 0;
+ }
+
r = s->store->type->read_metadata(s->store, dm_add_exception,
(void *)s);
if (r < 0) {
ti->error = "Failed to read snapshot metadata";
- goto bad_load_and_register;
+ goto bad_read_metadata;
} else if (r > 0) {
s->valid = 0;
DMWARN("Snapshot is marked invalid.");
}
- bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
- goto bad_load_and_register;
- }
-
- /* Add snapshot to the list of snapshots for this origin */
- /* Exceptions aren't triggered till snapshot_resume() is called */
- if (register_snapshot(s)) {
- r = -EINVAL;
- ti->error = "Cannot register snapshot origin";
- goto bad_load_and_register;
+ goto bad_read_metadata;
}
-
- ti->private = s;
ti->split_io = s->store->chunk_size;
- ti->num_flush_requests = 1;
return 0;
+bad_read_metadata:
+ unregister_snapshot(s);
+
bad_load_and_register:
mempool_destroy(s->tracked_chunk_pool);
@@ -702,19 +1209,22 @@ bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
bad_kcopyd:
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
bad_hash_tables:
dm_put_device(ti, s->origin);
bad_origin:
- kfree(s);
+ dm_exception_store_destroy(s->store);
-bad_snap:
- dm_exception_store_destroy(store);
+bad_store:
+ dm_put_device(ti, s->cow);
+
+bad_cow:
+ kfree(s);
-bad_args:
+bad:
return r;
}
@@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s)
dm_kcopyd_client_destroy(s->kcopyd_client);
s->kcopyd_client = NULL;
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
+}
+
+static void __handover_exceptions(struct dm_snapshot *snap_src,
+ struct dm_snapshot *snap_dest)
+{
+ union {
+ struct dm_exception_table table_swap;
+ struct dm_exception_store *store_swap;
+ } u;
+
+ /*
+ * Swap all snapshot context information between the two instances.
+ */
+ u.table_swap = snap_dest->complete;
+ snap_dest->complete = snap_src->complete;
+ snap_src->complete = u.table_swap;
+
+ u.store_swap = snap_dest->store;
+ snap_dest->store = snap_src->store;
+ snap_src->store = u.store_swap;
+
+ snap_dest->store->snap = snap_dest;
+ snap_src->store->snap = snap_src;
+
+ snap_dest->ti->split_io = snap_dest->store->chunk_size;
+ snap_dest->valid = snap_src->valid;
+
+ /*
+ * Set source invalid to ensure it receives no further I/O.
+ */
+ snap_src->valid = 0;
}
static void snapshot_dtr(struct dm_target *ti)
@@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti)
int i;
#endif
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
flush_workqueue(ksnapd);
+ down_read(&_origins_lock);
+ /* Check whether exception handover must be cancelled */
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest && (s == snap_src)) {
+ down_write(&snap_dest->lock);
+ snap_dest->valid = 0;
+ up_write(&snap_dest->lock);
+ DMERR("Cancelling snapshot handover.");
+ }
+ up_read(&_origins_lock);
+
+ if (dm_target_is_snapshot_merge(ti))
+ stop_merge(s);
+
/* Prevent further origin writes from using this snapshot. */
/* After this returns there can be no new kcopyd jobs. */
unregister_snapshot(s);
@@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ dm_put_device(ti, s->cow);
+
kfree(s);
}
@@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work)
flush_bios(queued_bios);
}
+static int do_origin(struct dm_dev *origin, struct bio *bio);
+
+/*
+ * Flush a list of buffers.
+ */
+static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
+{
+ struct bio *n;
+ int r;
+
+ while (bio) {
+ n = bio->bi_next;
+ bio->bi_next = NULL;
+ r = do_origin(s->origin, bio);
+ if (r == DM_MAPIO_REMAPPED)
+ generic_make_request(bio);
+ bio = n;
+ }
+}
+
/*
* Error a list of buffers.
*/
@@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
s->valid = 0;
- dm_table_event(s->store->ti->table);
-}
-
-static void get_pending_exception(struct dm_snap_pending_exception *pe)
-{
- atomic_inc(&pe->ref_count);
-}
-
-static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
-{
- struct dm_snap_pending_exception *primary_pe;
- struct bio *origin_bios = NULL;
-
- primary_pe = pe->primary_pe;
-
- /*
- * If this pe is involved in a write to the origin and
- * it is the last sibling to complete then release
- * the bios for the original write to the origin.
- */
- if (primary_pe &&
- atomic_dec_and_test(&primary_pe->ref_count)) {
- origin_bios = bio_list_get(&primary_pe->origin_bios);
- free_pending_exception(primary_pe);
- }
-
- /*
- * Free the pe if it's not linked to an origin write or if
- * it's not itself a primary pe.
- */
- if (!primary_pe || primary_pe != pe)
- free_pending_exception(pe);
-
- return origin_bios;
+ dm_table_event(s->ti->table);
}
static void pending_complete(struct dm_snap_pending_exception *pe, int success)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
@@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
goto out;
}
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e) {
down_write(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
@@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
down_write(&s->lock);
if (!s->valid) {
- free_exception(e);
+ free_completed_exception(e);
error = 1;
goto out;
}
- /*
- * Check for conflicting reads. This is extremely improbable,
- * so msleep(1) is sufficient and there is no need for a wait queue.
- */
- while (__chunk_is_tracked(s, pe->e.old_chunk))
- msleep(1);
+ /* Check for conflicting reads */
+ __check_for_conflicting_io(s, pe->e.old_chunk);
/*
* Add a proper exception, and remove the
* in-flight exception from the list.
*/
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
out:
- remove_exception(&pe->e);
+ dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
- origin_bios = put_pending_exception(pe);
+ origin_bios = bio_list_get(&pe->origin_bios);
+ free_pending_exception(pe);
+
+ increment_pending_exceptions_done_count();
up_write(&s->lock);
@@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
else
flush_bios(snapshot_bios);
- flush_bios(origin_bios);
+ retry_origin_bios(s, origin_bios);
}
static void commit_callback(void *context, int success)
@@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
- dest.bdev = s->store->cow->bdev;
+ dest.bdev = s->cow->bdev;
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
dest.count = src.count;
@@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
static struct dm_snap_pending_exception *
__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
{
- struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
+ struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
if (!e)
return NULL;
@@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s,
pe->e.old_chunk = chunk;
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
- pe->primary_pe = NULL;
- atomic_set(&pe->ref_count, 0);
pe->started = 0;
if (s->store->type->prepare_exception(s->store, &pe->e)) {
@@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL;
}
- get_pending_exception(pe);
- insert_exception(&s->pending, &pe->e);
+ dm_insert_exception(&s->pending, &pe->e);
return pe;
}
-static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
+static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
- bio->bi_bdev = s->store->cow->bdev;
+ bio->bi_bdev = s->cow->bdev;
bio->bi_sector = chunk_to_sector(s->store,
dm_chunk_number(e->new_chunk) +
(chunk - e->old_chunk)) +
@@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
static int snapshot_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
if (unlikely(bio_empty_barrier(bio))) {
- bio->bi_bdev = s->store->cow->bdev;
+ bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
/* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
+ e = dm_lookup_exception(&s->complete, chunk);
if (e) {
remap_exception(s, e, bio, chunk);
goto out_unlock;
@@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
goto out_unlock;
}
- e = lookup_exception(&s->complete, chunk);
+ e = dm_lookup_exception(&s->complete, chunk);
if (e) {
free_pending_exception(pe);
remap_exception(s, e, bio, chunk);
@@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
return r;
}
+/*
+ * A snapshot-merge target behaves like a combination of a snapshot
+ * target and a snapshot-origin target. It only generates new
+ * exceptions in other snapshots and not in the one that is being
+ * merged.
+ *
+ * For each chunk, if there is an existing exception, it is used to
+ * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
+ * which in turn might generate exceptions in other snapshots.
+ * If merging is currently taking place on the chunk in question, the
+ * I/O is deferred by adding it to s->bios_queued_during_merge.
+ */
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct dm_exception *e;
+ struct dm_snapshot *s = ti->private;
+ int r = DM_MAPIO_REMAPPED;
+ chunk_t chunk;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ if (!map_context->flush_request)
+ bio->bi_bdev = s->origin->bdev;
+ else
+ bio->bi_bdev = s->cow->bdev;
+ map_context->ptr = NULL;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ chunk = sector_to_chunk(s->store, bio->bi_sector);
+
+ down_write(&s->lock);
+
+ /* Full merging snapshots are redirected to the origin */
+ if (!s->valid)
+ goto redirect_to_origin;
+
+ /* If the block is already remapped - use that */
+ e = dm_lookup_exception(&s->complete, chunk);
+ if (e) {
+ /* Queue writes overlapping with chunks being merged */
+ if (bio_rw(bio) == WRITE &&
+ chunk >= s->first_merging_chunk &&
+ chunk < (s->first_merging_chunk +
+ s->num_merging_chunks)) {
+ bio->bi_bdev = s->origin->bdev;
+ bio_list_add(&s->bios_queued_during_merge, bio);
+ r = DM_MAPIO_SUBMITTED;
+ goto out_unlock;
+ }
+
+ remap_exception(s, e, bio, chunk);
+
+ if (bio_rw(bio) == WRITE)
+ map_context->ptr = track_chunk(s, chunk);
+ goto out_unlock;
+ }
+
+redirect_to_origin:
+ bio->bi_bdev = s->origin->bdev;
+
+ if (bio_rw(bio) == WRITE) {
+ up_write(&s->lock);
+ return do_origin(s->origin, bio);
+ }
+
+out_unlock:
+ up_write(&s->lock);
+
+ return r;
+}
+
static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
@@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
return 0;
}
+static void snapshot_merge_presuspend(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ stop_merge(s);
+}
+
+static void snapshot_postsuspend(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ down_write(&s->lock);
+ s->suspended = 1;
+ up_write(&s->lock);
+}
+
+static int snapshot_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+ down_read(&snap_src->lock);
+ if (s == snap_src) {
+ DMERR("Unable to resume snapshot source until "
+ "handover completes.");
+ r = -EINVAL;
+ } else if (!snap_src->suspended) {
+ DMERR("Unable to perform snapshot handover until "
+ "source is suspended.");
+ r = -EINVAL;
+ }
+ up_read(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ return r;
+}
+
static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ __handover_exceptions(snap_src, snap_dest);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ /* Now we have correct chunk size, reregister */
+ reregister_snapshot(s);
down_write(&s->lock);
s->active = 1;
+ s->suspended = 0;
up_write(&s->lock);
}
+static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
+{
+ sector_t min_chunksize;
+
+ down_read(&_origins_lock);
+ min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
+ up_read(&_origins_lock);
+
+ return min_chunksize;
+}
+
+static void snapshot_merge_resume(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ /*
+ * Handover exceptions from existing snapshot.
+ */
+ snapshot_resume(ti);
+
+ /*
+ * snapshot-merge acts as an origin, so set ti->split_io
+ */
+ ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
+
+ start_merge(s);
+}
+
static int snapshot_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
- down_write(&snap->lock);
-
switch (type) {
case STATUSTYPE_INFO:
+
+ down_write(&snap->lock);
+
if (!snap->valid)
DMEMIT("Invalid");
+ else if (snap->merge_failed)
+ DMEMIT("Merge failed");
else {
- if (snap->store->type->fraction_full) {
- sector_t numerator, denominator;
- snap->store->type->fraction_full(snap->store,
- &numerator,
- &denominator);
- DMEMIT("%llu/%llu",
- (unsigned long long)numerator,
- (unsigned long long)denominator);
+ if (snap->store->type->usage) {
+ sector_t total_sectors, sectors_allocated,
+ metadata_sectors;
+ snap->store->type->usage(snap->store,
+ &total_sectors,
+ &sectors_allocated,
+ &metadata_sectors);
+ DMEMIT("%llu/%llu %llu",
+ (unsigned long long)sectors_allocated,
+ (unsigned long long)total_sectors,
+ (unsigned long long)metadata_sectors);
}
else
DMEMIT("Unknown");
}
+
+ up_write(&snap->lock);
+
break;
case STATUSTYPE_TABLE:
@@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
* to make private copies if the output is to
* make sense.
*/
- DMEMIT("%s", snap->origin->name);
+ DMEMIT("%s %s", snap->origin->name, snap->cow->name);
snap->store->type->status(snap->store, type, result + sz,
maxlen - sz);
break;
}
- up_write(&snap->lock);
-
return 0;
}
@@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti,
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
-static int __origin_write(struct list_head *snapshots, struct bio *bio)
+
+/*
+ * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
+ * supplied bio was ignored. The caller may submit it immediately.
+ * (No remapping actually occurs as the origin is always a direct linear
+ * map.)
+ *
+ * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
+ * and any supplied bio is added to a list to be submitted once all
+ * the necessary exceptions exist.
+ */
+static int __origin_write(struct list_head *snapshots, sector_t sector,
+ struct bio *bio)
{
- int r = DM_MAPIO_REMAPPED, first = 0;
+ int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
- struct dm_snap_exception *e;
- struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
+ struct dm_exception *e;
+ struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe_to_start_now = NULL;
+ struct dm_snap_pending_exception *pe_to_start_last = NULL;
chunk_t chunk;
- LIST_HEAD(pe_queue);
/* Do all the snapshots on this origin */
list_for_each_entry (snap, snapshots, list) {
+ /*
+ * Don't make new exceptions in a merging snapshot
+ * because it has effectively been deleted
+ */
+ if (dm_target_is_snapshot_merge(snap->ti))
+ continue;
down_write(&snap->lock);
@@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
goto next_snapshot;
/* Nothing to do if writing beyond end of snapshot */
- if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
+ if (sector >= dm_table_get_size(snap->ti->table))
goto next_snapshot;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
- chunk = sector_to_chunk(snap->store, bio->bi_sector);
+ chunk = sector_to_chunk(snap->store, sector);
/*
* Check exception table to see if block
* is already remapped in this snapshot
* and trigger an exception if not.
- *
- * ref_count is initialised to 1 so pending_complete()
- * won't destroy the primary_pe while we're inside this loop.
*/
- e = lookup_exception(&snap->complete, chunk);
+ e = dm_lookup_exception(&snap->complete, chunk);
if (e)
goto next_snapshot;
@@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
goto next_snapshot;
}
- e = lookup_exception(&snap->complete, chunk);
+ e = dm_lookup_exception(&snap->complete, chunk);
if (e) {
free_pending_exception(pe);
goto next_snapshot;
@@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
}
}
- if (!primary_pe) {
- /*
- * Either every pe here has same
- * primary_pe or none has one yet.
- */
- if (pe->primary_pe)
- primary_pe = pe->primary_pe;
- else {
- primary_pe = pe;
- first = 1;
- }
-
- bio_list_add(&primary_pe->origin_bios, bio);
+ r = DM_MAPIO_SUBMITTED;
- r = DM_MAPIO_SUBMITTED;
- }
+ /*
+ * If an origin bio was supplied, queue it to wait for the
+ * completion of this exception, and start this one last,
+ * at the end of the function.
+ */
+ if (bio) {
+ bio_list_add(&pe->origin_bios, bio);
+ bio = NULL;
- if (!pe->primary_pe) {
- pe->primary_pe = primary_pe;
- get_pending_exception(primary_pe);
+ if (!pe->started) {
+ pe->started = 1;
+ pe_to_start_last = pe;
+ }
}
if (!pe->started) {
pe->started = 1;
- list_add_tail(&pe->list, &pe_queue);
+ pe_to_start_now = pe;
}
next_snapshot:
up_write(&snap->lock);
- }
- if (!primary_pe)
- return r;
-
- /*
- * If this is the first time we're processing this chunk and
- * ref_count is now 1 it means all the pending exceptions
- * got completed while we were in the loop above, so it falls to
- * us here to remove the primary_pe and submit any origin_bios.
- */
-
- if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
- flush_bios(bio_list_get(&primary_pe->origin_bios));
- free_pending_exception(primary_pe);
- /* If we got here, pe_queue is necessarily empty. */
- return r;
+ if (pe_to_start_now) {
+ start_copy(pe_to_start_now);
+ pe_to_start_now = NULL;
+ }
}
/*
- * Now that we have a complete pe list we can start the copying.
+ * Submit the exception against which the bio is queued last,
+ * to give the other exceptions a head start.
*/
- list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
- start_copy(pe);
+ if (pe_to_start_last)
+ start_copy(pe_to_start_last);
return r;
}
@@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio);
+ r = __origin_write(&o->snapshots, bio->bi_sector, bio);
up_read(&_origins_lock);
return r;
}
/*
+ * Trigger exceptions in all non-merging snapshots.
+ *
+ * The chunk size of the merging snapshot may be larger than the chunk
+ * size of some other snapshot so we may need to reallocate multiple
+ * chunks in other snapshots.
+ *
+ * We scan all the overlapping exceptions in the other snapshots.
+ * Returns 1 if anything was reallocated and must be waited for,
+ * otherwise returns 0.
+ *
+ * size must be a multiple of merging_snap's chunk_size.
+ */
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+ sector_t sector, unsigned size)
+{
+ int must_wait = 0;
+ sector_t n;
+ struct origin *o;
+
+ /*
+ * The origin's __minimum_chunk_size() got stored in split_io
+ * by snapshot_merge_resume().
+ */
+ down_read(&_origins_lock);
+ o = __lookup_origin(merging_snap->origin->bdev);
+ for (n = 0; n < size; n += merging_snap->ti->split_io)
+ if (__origin_write(&o->snapshots, sector + n, NULL) ==
+ DM_MAPIO_SUBMITTED)
+ must_wait = 1;
+ up_read(&_origins_lock);
+
+ return must_wait;
+}
+
+/*
* Origin: maps a linear range of a device, with hooks for snapshotting.
*/
@@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
/*
* Set the target "split_io" field to the minimum of all the snapshots'
* chunk sizes.
@@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
static void origin_resume(struct dm_target *ti)
{
struct dm_dev *dev = ti->private;
- struct dm_snapshot *snap;
- struct origin *o;
- unsigned chunk_size = 0;
-
- down_read(&_origins_lock);
- o = __lookup_origin(dev->bdev);
- if (o)
- list_for_each_entry (snap, &o->snapshots, list)
- chunk_size = min_not_zero(chunk_size,
- snap->store->chunk_size);
- up_read(&_origins_lock);
- ti->split_io = chunk_size;
+ ti->split_io = get_origin_minimum_chunksize(dev->bdev);
}
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1455,17 +2183,35 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 7, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
.end_io = snapshot_end_io,
+ .postsuspend = snapshot_postsuspend,
+ .preresume = snapshot_preresume,
.resume = snapshot_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
};
+static struct target_type merge_target = {
+ .name = dm_snapshot_merge_target_name,
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+ .map = snapshot_merge_map,
+ .end_io = snapshot_end_io,
+ .presuspend = snapshot_merge_presuspend,
+ .postsuspend = snapshot_postsuspend,
+ .preresume = snapshot_preresume,
+ .resume = snapshot_merge_resume,
+ .status = snapshot_status,
+ .iterate_devices = snapshot_iterate_devices,
+};
+
static int __init dm_snapshot_init(void)
{
int r;
@@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void)
}
r = dm_register_target(&snapshot_target);
- if (r) {
+ if (r < 0) {
DMERR("snapshot target register failed %d", r);
goto bad_register_snapshot_target;
}
@@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void)
r = dm_register_target(&origin_target);
if (r < 0) {
DMERR("Origin target register failed %d", r);
- goto bad1;
+ goto bad_register_origin_target;
+ }
+
+ r = dm_register_target(&merge_target);
+ if (r < 0) {
+ DMERR("Merge target register failed %d", r);
+ goto bad_register_merge_target;
}
r = init_origin_hash();
if (r) {
DMERR("init_origin_hash failed.");
- goto bad2;
+ goto bad_origin_hash;
}
- exception_cache = KMEM_CACHE(dm_snap_exception, 0);
+ exception_cache = KMEM_CACHE(dm_exception, 0);
if (!exception_cache) {
DMERR("Couldn't create exception cache.");
r = -ENOMEM;
- goto bad3;
+ goto bad_exception_cache;
}
pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
if (!pending_cache) {
DMERR("Couldn't create pending cache.");
r = -ENOMEM;
- goto bad4;
+ goto bad_pending_cache;
}
tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
if (!tracked_chunk_cache) {
DMERR("Couldn't create cache to track chunks in use.");
r = -ENOMEM;
- goto bad5;
+ goto bad_tracked_chunk_cache;
}
ksnapd = create_singlethread_workqueue("ksnapd");
@@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void)
bad_pending_pool:
kmem_cache_destroy(tracked_chunk_cache);
-bad5:
+bad_tracked_chunk_cache:
kmem_cache_destroy(pending_cache);
-bad4:
+bad_pending_cache:
kmem_cache_destroy(exception_cache);
-bad3:
+bad_exception_cache:
exit_origin_hash();
-bad2:
+bad_origin_hash:
+ dm_unregister_target(&merge_target);
+bad_register_merge_target:
dm_unregister_target(&origin_target);
-bad1:
+bad_register_origin_target:
dm_unregister_target(&snapshot_target);
-
bad_register_snapshot_target:
dm_exception_store_exit();
+
return r;
}
@@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void)
dm_unregister_target(&snapshot_target);
dm_unregister_target(&origin_target);
+ dm_unregister_target(&merge_target);
exit_origin_hash();
kmem_cache_destroy(pending_cache);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 4b045903a4e2..f53392df7b97 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_suspended(md));
+ sprintf(buf, "%d\n", dm_suspended_md(md));
return strlen(buf);
}
@@ -80,12 +80,20 @@ static struct sysfs_ops dm_sysfs_ops = {
};
/*
+ * The sysfs structure is embedded in md struct, nothing to do here
+ */
+static void dm_sysfs_release(struct kobject *kobj)
+{
+}
+
+/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
*/
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
+ .release = dm_sysfs_release
};
/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1a6cb3c7822e..be625475cf6d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
@@ -237,6 +238,9 @@ void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
+ if (!t)
+ return;
+
while (atomic_read(&t->holders))
msleep(1);
smp_mb();
@@ -600,11 +604,8 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return -ENOMEM;
while (1) {
- start = end;
-
/* Skip whitespace */
- while (*start && isspace(*start))
- start++;
+ start = skip_spaces(end);
if (!*start)
break; /* success, we hit the end */
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 6f65883aef12..c7c555a8c7b2 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -139,14 +139,13 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
list_del_init(&event->elist);
/*
- * Need to call dm_copy_name_and_uuid from here for now.
- * Context of previous var adds and locking used for
- * hash_cell not compatable.
+ * When a device is being removed this copy fails and we
+ * discard these unsent events.
*/
if (dm_copy_name_and_uuid(event->md, event->name,
event->uuid)) {
- DMERR("%s: dm_copy_name_and_uuid() failed",
- __func__);
+ DMINFO("%s: skipping sending uevent for lost device",
+ __func__);
goto uevent_free;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 724efc63904d..3167480b532c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -143,9 +143,19 @@ struct mapped_device {
int barrier_error;
/*
+ * Protect barrier_error from concurrent endio processing
+ * in request-based dm.
+ */
+ spinlock_t barrier_error_lock;
+
+ /*
* Processing queue (flush/barriers)
*/
struct workqueue_struct *wq;
+ struct work_struct barrier_work;
+
+ /* A pointer to the currently processing pre/post flush request */
+ struct request *flush_request;
/*
* The current mapping.
@@ -178,9 +188,6 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* marker of flush suspend for request-based dm */
- struct request suspend_rq;
-
/* For saving the address of __make_request for request based dm */
make_request_fn *saved_make_request_fn;
@@ -275,6 +282,7 @@ static int (*_inits[])(void) __initdata = {
dm_target_init,
dm_linear_init,
dm_stripe_init,
+ dm_io_init,
dm_kcopyd_init,
dm_interface_init,
};
@@ -284,6 +292,7 @@ static void (*_exits[])(void) = {
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
+ dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
};
@@ -320,6 +329,11 @@ static void __exit dm_exit(void)
/*
* Block device functions
*/
+int dm_deleting_md(struct mapped_device *md)
+{
+ return test_bit(DMF_DELETING, &md->flags);
+}
+
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
@@ -331,7 +345,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
- test_bit(DMF_DELETING, &md->flags)) {
+ dm_deleting_md(md)) {
md = NULL;
goto out;
}
@@ -388,7 +402,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
@@ -401,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
tgt = dm_table_get_target(map, 0);
- if (dm_suspended(md)) {
+ if (dm_suspended_md(md)) {
r = -EAGAIN;
goto out;
}
@@ -430,9 +444,10 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
mempool_free(tio, md->tio_pool);
}
-static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
{
- return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+ return mempool_alloc(md->tio_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
@@ -450,6 +465,12 @@ static void free_bio_info(struct dm_rq_clone_bio_info *info)
mempool_free(info, info->tio->md->io_pool);
}
+static int md_in_flight(struct mapped_device *md)
+{
+ return atomic_read(&md->pending[READ]) +
+ atomic_read(&md->pending[WRITE]);
+}
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
@@ -512,7 +533,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
* function to access the md->map field, and make sure they call
* dm_table_put() when finished.
*/
-struct dm_table *dm_get_table(struct mapped_device *md)
+struct dm_table *dm_get_live_table(struct mapped_device *md)
{
struct dm_table *t;
unsigned long flags;
@@ -716,28 +737,38 @@ static void end_clone_bio(struct bio *clone, int error)
blk_update_request(tio->orig, 0, nr_bytes);
}
+static void store_barrier_error(struct mapped_device *md, int error)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md->barrier_error_lock, flags);
+ /*
+ * Basically, the first error is taken, but:
+ * -EOPNOTSUPP supersedes any I/O error.
+ * Requeue request supersedes any I/O error but -EOPNOTSUPP.
+ */
+ if (!md->barrier_error || error == -EOPNOTSUPP ||
+ (md->barrier_error != -EOPNOTSUPP &&
+ error == DM_ENDIO_REQUEUE))
+ md->barrier_error = error;
+ spin_unlock_irqrestore(&md->barrier_error_lock, flags);
+}
+
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int run_queue)
+static void rq_completed(struct mapped_device *md, int rw, int run_queue)
{
- int wakeup_waiters = 0;
- struct request_queue *q = md->queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!queue_in_flight(q))
- wakeup_waiters = 1;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- if (wakeup_waiters)
+ if (!md_in_flight(md))
wake_up(&md->wait);
if (run_queue)
- blk_run_queue(q);
+ blk_run_queue(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -753,6 +784,44 @@ static void free_rq_clone(struct request *clone)
free_rq_tio(tio);
}
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ int rw = rq_data_dir(clone);
+ int run_queue = 1;
+ bool is_barrier = blk_barrier_rq(clone);
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (blk_pc_request(rq) && !is_barrier) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ free_rq_clone(clone);
+
+ if (unlikely(is_barrier)) {
+ if (unlikely(error))
+ store_barrier_error(md, error);
+ run_queue = 0;
+ } else
+ blk_end_request_all(rq, error);
+
+ rq_completed(md, rw, run_queue);
+}
+
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
@@ -768,12 +837,23 @@ static void dm_unprep_request(struct request *rq)
*/
void dm_requeue_unmapped_request(struct request *clone)
{
+ int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request_queue *q = rq->q;
unsigned long flags;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request.
+ * Leave it to dm_end_request(), which handles this special
+ * case.
+ */
+ dm_end_request(clone, DM_ENDIO_REQUEUE);
+ return;
+ }
+
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
@@ -782,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone)
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- rq_completed(md, 0);
+ rq_completed(md, rw, 0);
}
EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
@@ -815,34 +895,28 @@ static void start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-/*
- * Complete the clone and the original request.
- * Must be called without queue lock.
- */
-static void dm_end_request(struct request *clone, int error)
+static void dm_done(struct request *clone, int error, bool mapped)
{
+ int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
- struct request *rq = tio->orig;
+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
- if (blk_pc_request(rq)) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, r);
+ else if (r == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (r == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ else {
+ DMWARN("unimplemented target endio return value: %d", r);
+ BUG();
}
-
- free_rq_clone(clone);
-
- blk_end_request_all(rq, error);
-
- rq_completed(md, 1);
}
/*
@@ -850,27 +924,14 @@ static void dm_end_request(struct request *clone, int error)
*/
static void dm_softirq_done(struct request *rq)
{
+ bool mapped = true;
struct request *clone = rq->completion_data;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
- int error = tio->error;
- if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
- error = rq_end_io(tio->ti, clone, error, &tio->info);
+ if (rq->cmd_flags & REQ_FAILED)
+ mapped = false;
- if (error <= 0)
- /* The target wants to complete the I/O */
- dm_end_request(clone, error);
- else if (error == DM_ENDIO_INCOMPLETE)
- /* The target will handle the I/O */
- return;
- else if (error == DM_ENDIO_REQUEUE)
- /* The target wants to requeue the I/O */
- dm_requeue_unmapped_request(clone);
- else {
- DMWARN("unimplemented target endio return value: %d", error);
- BUG();
- }
+ dm_done(clone, tio->error, mapped);
}
/*
@@ -882,6 +943,19 @@ static void dm_complete_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request. So can't use
+ * softirq_done with the original.
+ * Pass the clone to dm_done() directly in this special case.
+ * It is safe (even if clone->q->queue_lock is held here)
+ * because there is no I/O dispatching during the completion
+ * of barrier clone.
+ */
+ dm_done(clone, error, true);
+ return;
+ }
+
tio->error = error;
rq->completion_data = clone;
blk_complete_request(rq);
@@ -898,6 +972,17 @@ void dm_kill_unmapped_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request.
+ * Leave it to dm_end_request(), which handles this special
+ * case.
+ */
+ BUG_ON(error > 0);
+ dm_end_request(clone, error);
+ return;
+ }
+
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(clone, error);
}
@@ -1214,7 +1299,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
struct clone_info ci;
int error = 0;
- ci.map = dm_get_table(md);
+ ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
bio_io_error(bio);
@@ -1255,7 +1340,7 @@ static int dm_merge_bvec(struct request_queue *q,
struct bio_vec *biovec)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
sector_t max_sectors;
int max_size = 0;
@@ -1352,11 +1437,6 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
return md->saved_make_request_fn(q, bio); /* call __make_request() */
}
@@ -1375,6 +1455,25 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return _dm_request(q, bio);
}
+/*
+ * Mark this request as flush request, so that dm_request_fn() can
+ * recognize.
+ */
+static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
+{
+ rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ rq->cmd[0] = REQ_LB_OP_FLUSH;
+}
+
+static bool dm_rq_is_flush_request(struct request *rq)
+{
+ if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+ rq->cmd[0] == REQ_LB_OP_FLUSH)
+ return true;
+ else
+ return false;
+}
+
void dm_dispatch_request(struct request *rq)
{
int r;
@@ -1420,25 +1519,54 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
static int setup_clone(struct request *clone, struct request *rq,
struct dm_rq_target_io *tio)
{
- int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
- dm_rq_bio_constructor, tio);
+ int r;
- if (r)
- return r;
+ if (dm_rq_is_flush_request(rq)) {
+ blk_rq_init(NULL, clone);
+ clone->cmd_type = REQ_TYPE_FS;
+ clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
+ } else {
+ r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ dm_rq_bio_constructor, tio);
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->buffer = rq->buffer;
+ }
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
- clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
return 0;
}
-static int dm_rq_flush_suspending(struct mapped_device *md)
+static struct request *clone_rq(struct request *rq, struct mapped_device *md,
+ gfp_t gfp_mask)
{
- return !md->suspend_rq.special;
+ struct request *clone;
+ struct dm_rq_target_io *tio;
+
+ tio = alloc_rq_tio(md, gfp_mask);
+ if (!tio)
+ return NULL;
+
+ tio->md = md;
+ tio->ti = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ clone = &tio->clone;
+ if (setup_clone(clone, rq, tio)) {
+ /* -ENOMEM */
+ free_rq_tio(tio);
+ return NULL;
+ }
+
+ return clone;
}
/*
@@ -1447,39 +1575,19 @@ static int dm_rq_flush_suspending(struct mapped_device *md)
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
struct request *clone;
- if (unlikely(rq == &md->suspend_rq)) {
- if (dm_rq_flush_suspending(md))
- return BLKPREP_OK;
- else
- /* The flush suspend was interrupted */
- return BLKPREP_KILL;
- }
+ if (unlikely(dm_rq_is_flush_request(rq)))
+ return BLKPREP_OK;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
- tio = alloc_rq_tio(md); /* Only one for each original request */
- if (!tio)
- /* -ENOMEM */
- return BLKPREP_DEFER;
-
- tio->md = md;
- tio->ti = NULL;
- tio->orig = rq;
- tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
-
- clone = &tio->clone;
- if (setup_clone(clone, rq, tio)) {
- /* -ENOMEM */
- free_rq_tio(tio);
+ clone = clone_rq(rq, md, GFP_ATOMIC);
+ if (!clone)
return BLKPREP_DEFER;
- }
rq->special = clone;
rq->cmd_flags |= REQ_DONTPREP;
@@ -1487,11 +1595,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_OK;
}
-static void map_request(struct dm_target *ti, struct request *rq,
+static void map_request(struct dm_target *ti, struct request *clone,
struct mapped_device *md)
{
int r;
- struct request *clone = rq->special;
struct dm_rq_target_io *tio = clone->end_io_data;
/*
@@ -1511,6 +1618,8 @@ static void map_request(struct dm_target *ti, struct request *rq,
break;
case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */
+ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ blk_rq_pos(tio->orig));
dm_dispatch_request(clone);
break;
case DM_MAPIO_REQUEUE:
@@ -1536,29 +1645,26 @@ static void map_request(struct dm_target *ti, struct request *rq,
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
- struct request *rq;
+ struct request *rq, *clone;
/*
- * For noflush suspend, check blk_queue_stopped() to immediately
- * quit I/O dispatching.
+ * For suspend, check blk_queue_stopped() and increment
+ * ->pending within a single queue_lock not to increment the
+ * number of in-flight I/Os after the queue is stopped in
+ * dm_suspend().
*/
while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
goto plug_and_out;
- if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
- if (queue_in_flight(q))
- /* Not quiet yet. Wait more */
- goto plug_and_out;
-
- /* This device should be quiet now */
- __stop_queue(q);
+ if (unlikely(dm_rq_is_flush_request(rq))) {
+ BUG_ON(md->flush_request);
+ md->flush_request = rq;
blk_start_request(rq);
- __blk_end_request_all(rq, 0);
- wake_up(&md->wait);
+ queue_work(md->wq, &md->barrier_work);
goto out;
}
@@ -1567,8 +1673,11 @@ static void dm_request_fn(struct request_queue *q)
goto plug_and_out;
blk_start_request(rq);
+ clone = rq->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
spin_unlock(q->queue_lock);
- map_request(ti, rq, md);
+ map_request(ti, clone, md);
spin_lock_irq(q->queue_lock);
}
@@ -1595,7 +1704,7 @@ static int dm_lld_busy(struct request_queue *q)
{
int r;
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
@@ -1610,7 +1719,7 @@ static int dm_lld_busy(struct request_queue *q)
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (map) {
if (dm_request_based(md))
@@ -1628,7 +1737,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (map) {
/*
* Request-based dm cares about only own queue for
@@ -1725,6 +1834,7 @@ out:
static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
+static void dm_rq_barrier_work(struct work_struct *work);
/*
* Allocate and initialise a blank device with a given minor.
@@ -1754,6 +1864,7 @@ static struct mapped_device *alloc_dev(int minor)
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
spin_lock_init(&md->deferred_lock);
+ spin_lock_init(&md->barrier_error_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
@@ -1788,6 +1899,8 @@ static struct mapped_device *alloc_dev(int minor)
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ dm_rq_prepare_flush);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -1797,6 +1910,7 @@ static struct mapped_device *alloc_dev(int minor)
atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
+ INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
init_waitqueue_head(&md->eventq);
md->disk->major = _major;
@@ -1921,9 +2035,13 @@ static void __set_size(struct mapped_device *md, sector_t size)
mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
-static int __bind(struct mapped_device *md, struct dm_table *t,
- struct queue_limits *limits)
+/*
+ * Returns old map, which caller must destroy.
+ */
+static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ struct queue_limits *limits)
{
+ struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
@@ -1938,11 +2056,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
__set_size(md, size);
- if (!size) {
- dm_table_destroy(t);
- return 0;
- }
-
dm_table_event_callback(t, event_callback, md);
/*
@@ -1958,26 +2071,31 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
write_lock_irqsave(&md->map_lock, flags);
+ old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
write_unlock_irqrestore(&md->map_lock, flags);
- return 0;
+ return old_map;
}
-static void __unbind(struct mapped_device *md)
+/*
+ * Returns unbound table for the caller to free.
+ */
+static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
unsigned long flags;
if (!map)
- return;
+ return NULL;
dm_table_event_callback(map, NULL, NULL);
write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
write_unlock_irqrestore(&md->map_lock, flags);
- dm_table_destroy(map);
+
+ return map;
}
/*
@@ -2059,18 +2177,18 @@ void dm_put(struct mapped_device *md)
BUG_ON(test_bit(DMF_FREEING, &md->flags));
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
idr_replace(&_minor_idr, MINOR_ALLOCED,
MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- if (!dm_suspended(md)) {
+ if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
dm_sysfs_exit(md);
dm_table_put(map);
- __unbind(md);
+ dm_table_destroy(__unbind(md));
free_dev(md);
}
}
@@ -2080,8 +2198,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
- struct request_queue *q = md->queue;
- unsigned long flags;
dm_unplug_all(md->queue);
@@ -2091,15 +2207,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
set_current_state(interruptible);
smp_mb();
- if (dm_request_based(md)) {
- spin_lock_irqsave(q->queue_lock, flags);
- if (!queue_in_flight(q) && blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- break;
- }
- spin_unlock_irqrestore(q->queue_lock, flags);
- } else if (!atomic_read(&md->pending[0]) &&
- !atomic_read(&md->pending[1]))
+ if (!md_in_flight(md))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2194,98 +2302,106 @@ static void dm_queue_flush(struct mapped_device *md)
queue_work(md->wq, &md->work);
}
-/*
- * Swap in a new table (destroying old one).
- */
-int dm_swap_table(struct mapped_device *md, struct dm_table *table)
+static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
{
- struct queue_limits limits;
- int r = -EINVAL;
+ struct dm_rq_target_io *tio = clone->end_io_data;
- mutex_lock(&md->suspend_lock);
+ tio->info.flush_request = flush_nr;
+}
- /* device must be suspended */
- if (!dm_suspended(md))
- goto out;
+/* Issue barrier requests to targets and wait for their completion. */
+static int dm_rq_barrier(struct mapped_device *md)
+{
+ int i, j;
+ struct dm_table *map = dm_get_live_table(md);
+ unsigned num_targets = dm_table_get_num_targets(map);
+ struct dm_target *ti;
+ struct request *clone;
- r = dm_calculate_queue_limits(table, &limits);
- if (r)
- goto out;
+ md->barrier_error = 0;
- /* cannot change the device type, once a table is bound */
- if (md->map &&
- (dm_table_get_type(md->map) != dm_table_get_type(table))) {
- DMWARN("can't change the device type after a table is bound");
- goto out;
+ for (i = 0; i < num_targets; i++) {
+ ti = dm_table_get_target(map, i);
+ for (j = 0; j < ti->num_flush_requests; j++) {
+ clone = clone_rq(md->flush_request, md, GFP_NOIO);
+ dm_rq_set_flush_nr(clone, j);
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+ map_request(ti, clone, md);
+ }
}
- __unbind(md);
- r = __bind(md, table, &limits);
-
-out:
- mutex_unlock(&md->suspend_lock);
- return r;
-}
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ dm_table_put(map);
-static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
-{
- md->suspend_rq.special = (void *)0x1;
+ return md->barrier_error;
}
-static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+static void dm_rq_barrier_work(struct work_struct *work)
{
+ int error;
+ struct mapped_device *md = container_of(work, struct mapped_device,
+ barrier_work);
struct request_queue *q = md->queue;
+ struct request *rq;
unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- if (!noflush)
- dm_rq_invalidate_suspend_marker(md);
- __start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
+ /*
+ * Hold the md reference here and leave it at the last part so that
+ * the md can't be deleted by device opener when the barrier request
+ * completes.
+ */
+ dm_get(md);
-static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
-{
- struct request *rq = &md->suspend_rq;
- struct request_queue *q = md->queue;
+ error = dm_rq_barrier(md);
- if (noflush)
- stop_queue(q);
- else {
- blk_rq_init(q, rq);
- blk_insert_request(q, rq, 0, NULL);
- }
+ rq = md->flush_request;
+ md->flush_request = NULL;
+
+ if (error == DM_ENDIO_REQUEUE) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, rq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else
+ blk_end_request_all(rq, error);
+
+ blk_run_queue(q);
+
+ dm_put(md);
}
-static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+/*
+ * Swap in a new table, returning the old one for the caller to destroy.
+ */
+struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- int r = 1;
- struct request *rq = &md->suspend_rq;
- struct request_queue *q = md->queue;
- unsigned long flags;
+ struct dm_table *map = ERR_PTR(-EINVAL);
+ struct queue_limits limits;
+ int r;
- if (noflush)
- return r;
+ mutex_lock(&md->suspend_lock);
- /* The marker must be protected by queue lock if it is in use */
- spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(rq->ref_count)) {
- /*
- * This can happen, when the previous flush suspend was
- * interrupted, the marker is still in the queue and
- * this flush suspend has been invoked, because we don't
- * remove the marker at the time of suspend interruption.
- * We have only one marker per mapped_device, so we can't
- * start another flush suspend while it is in use.
- */
- BUG_ON(!rq->special); /* The marker should be invalidated */
- DMWARN("Invalidating the previous flush suspend is still in"
- " progress. Please retry later.");
- r = 0;
+ /* device must be suspended */
+ if (!dm_suspended_md(md))
+ goto out;
+
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r) {
+ map = ERR_PTR(r);
+ goto out;
}
- spin_unlock_irqrestore(q->queue_lock, flags);
- return r;
+ /* cannot change the device type, once a table is bound */
+ if (md->map &&
+ (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+ DMWARN("can't change the device type after a table is bound");
+ goto out;
+ }
+
+ map = __bind(md, table, &limits);
+
+out:
+ mutex_unlock(&md->suspend_lock);
+ return map;
}
/*
@@ -2330,49 +2446,11 @@ static void unlock_fs(struct mapped_device *md)
/*
* Suspend mechanism in request-based dm.
*
- * After the suspend starts, further incoming requests are kept in
- * the request_queue and deferred.
- * Remaining requests in the request_queue at the start of suspend are flushed
- * if it is flush suspend.
- * The suspend completes when the following conditions have been satisfied,
- * so wait for it:
- * 1. q->in_flight is 0 (which means no in_flight request)
- * 2. queue has been stopped (which means no request dispatching)
- *
+ * 1. Flush all I/Os by lock_fs() if needed.
+ * 2. Stop dispatching any I/O by stopping the request_queue.
+ * 3. Wait for all in-flight I/Os to be completed or requeued.
*
- * Noflush suspend
- * ---------------
- * Noflush suspend doesn't need to dispatch remaining requests.
- * So stop the queue immediately. Then, wait for all in_flight requests
- * to be completed or requeued.
- *
- * To abort noflush suspend, start the queue.
- *
- *
- * Flush suspend
- * -------------
- * Flush suspend needs to dispatch remaining requests. So stop the queue
- * after the remaining requests are completed. (Requeued request must be also
- * re-dispatched and completed. Until then, we can't stop the queue.)
- *
- * During flushing the remaining requests, further incoming requests are also
- * inserted to the same queue. To distinguish which requests are to be
- * flushed, we insert a marker request to the queue at the time of starting
- * flush suspend, like a barrier.
- * The dispatching is blocked when the marker is found on the top of the queue.
- * And the queue is stopped when all in_flight requests are completed, since
- * that means the remaining requests are completely flushed.
- * Then, the marker is removed from the queue.
- *
- * To abort flush suspend, we also need to take care of the marker, not only
- * starting the queue.
- * We don't remove the marker forcibly from the queue since it's against
- * the block-layer manner. Instead, we put a invalidated mark on the marker.
- * When the invalidated marker is found on the top of the queue, it is
- * immediately removed from the queue, so it doesn't block dispatching.
- * Because we have only one marker per mapped_device, we can't start another
- * flush suspend until the invalidated marker is removed from the queue.
- * So fail and return with -EBUSY in such a case.
+ * To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
@@ -2383,17 +2461,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
mutex_lock(&md->suspend_lock);
- if (dm_suspended(md)) {
+ if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
- if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
- r = -EBUSY;
- goto out_unlock;
- }
-
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
@@ -2406,8 +2479,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
dm_table_presuspend_targets(map);
/*
- * Flush I/O to the device. noflush supersedes do_lockfs,
- * because lock_fs() needs to flush I/Os.
+ * Flush I/O to the device.
+ * Any I/O submitted after lock_fs() may not be flushed.
+ * noflush takes precedence over do_lockfs.
+ * (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
@@ -2436,10 +2511,15 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
up_write(&md->io_lock);
- flush_workqueue(md->wq);
-
+ /*
+ * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
+ * can be kicked until md->queue is stopped. So stop md->queue before
+ * flushing md->wq.
+ */
if (dm_request_based(md))
- dm_rq_start_suspend(md, noflush);
+ stop_queue(md->queue);
+
+ flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
@@ -2458,7 +2538,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
dm_queue_flush(md);
if (dm_request_based(md))
- dm_rq_abort_suspend(md, noflush);
+ start_queue(md->queue);
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
@@ -2470,10 +2550,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
* requests are being added to md->deferred list.
*/
- dm_table_postsuspend_targets(map);
-
set_bit(DMF_SUSPENDED, &md->flags);
+ dm_table_postsuspend_targets(map);
+
out:
dm_table_put(map);
@@ -2488,10 +2568,10 @@ int dm_resume(struct mapped_device *md)
struct dm_table *map = NULL;
mutex_lock(&md->suspend_lock);
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
goto out;
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;
@@ -2592,18 +2672,29 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
return NULL;
if (test_bit(DMF_FREEING, &md->flags) ||
- test_bit(DMF_DELETING, &md->flags))
+ dm_deleting_md(md))
return NULL;
dm_get(md);
return md;
}
-int dm_suspended(struct mapped_device *md)
+int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
+int dm_suspended(struct dm_target *ti)
+{
+ struct mapped_device *md = dm_table_get_md(ti->table);
+ int r = dm_suspended_md(md);
+
+ dm_put(md);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_suspended);
+
int dm_noflush_suspending(struct dm_target *ti)
{
struct mapped_device *md = dm_table_get_md(ti->table);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a7663eba17e2..8dadaa5bc396 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -89,6 +89,16 @@ int dm_target_iterate(void (*iter_func)(struct target_type *tt,
int dm_split_args(int *argc, char ***argvp, char *input);
/*
+ * Is this mapped_device being deleted?
+ */
+int dm_deleting_md(struct mapped_device *md);
+
+/*
+ * Is this mapped_device suspended?
+ */
+int dm_suspended_md(struct mapped_device *md);
+
+/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
@@ -118,6 +128,9 @@ int dm_lock_for_deletion(struct mapped_device *md);
void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
+int dm_io_init(void);
+void dm_io_exit(void);
+
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 87d88dbb667f..713acd02ab39 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -360,6 +360,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fault injection personality for MD");
MODULE_ALIAS("md-personality-10"); /* faulty */
MODULE_ALIAS("md-faulty");
MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 1ceceb334d5e..00435bd20699 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -292,7 +292,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -383,6 +383,7 @@ static void linear_exit (void)
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD");
MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
MODULE_ALIAS("md-linear");
MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5f154ef1e4be..f4f5f82f9f53 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,11 +39,13 @@
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/file.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
@@ -68,6 +70,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+ * count by 2 for every hour elapsed between read errors.
+ */
+#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
@@ -213,12 +221,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
return 0;
}
rcu_read_lock();
- if (mddev->suspended) {
+ if (mddev->suspended || mddev->barrier) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
+ if (!mddev->suspended && !mddev->barrier)
break;
rcu_read_unlock();
schedule();
@@ -260,10 +268,110 @@ static void mddev_resume(mddev_t *mddev)
int mddev_congested(mddev_t *mddev, int bits)
{
+ if (mddev->barrier)
+ return 1;
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
+/*
+ * Generic barrier handling for md
+ */
+
+#define POST_REQUEST_BARRIER ((void*)1)
+
+static void md_end_barrier(struct bio *bio, int err)
+{
+ mdk_rdev_t *rdev = bio->bi_private;
+ mddev_t *mddev = rdev->mddev;
+ if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
+ set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
+
+ rdev_dec_pending(rdev, mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ if (mddev->barrier == POST_REQUEST_BARRIER) {
+ /* This was a post-request barrier */
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ } else
+ /* The pre-request barrier has finished */
+ schedule_work(&mddev->barrier_work);
+ }
+ bio_put(bio);
+}
+
+static void submit_barriers(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ /* Take two references, one is dropped
+ * when request finishes, one after
+ * we reclaim rcu_read_lock
+ */
+ struct bio *bi;
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ bi = bio_alloc(GFP_KERNEL, 0);
+ bi->bi_end_io = md_end_barrier;
+ bi->bi_private = rdev;
+ bi->bi_bdev = rdev->bdev;
+ atomic_inc(&mddev->flush_pending);
+ submit_bio(WRITE_BARRIER, bi);
+ rcu_read_lock();
+ rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
+}
+
+static void md_submit_barrier(struct work_struct *ws)
+{
+ mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
+ struct bio *bio = mddev->barrier;
+
+ atomic_set(&mddev->flush_pending, 1);
+
+ if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+ bio_endio(bio, -EOPNOTSUPP);
+ else if (bio->bi_size == 0)
+ /* an empty barrier - all done */
+ bio_endio(bio, 0);
+ else {
+ bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ if (mddev->pers->make_request(mddev->queue, bio))
+ generic_make_request(bio);
+ mddev->barrier = POST_REQUEST_BARRIER;
+ submit_barriers(mddev);
+ }
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ }
+}
+
+void md_barrier_request(mddev_t *mddev, struct bio *bio)
+{
+ spin_lock_irq(&mddev->write_lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->barrier,
+ mddev->write_lock, /*nothing*/);
+ mddev->barrier = bio;
+ spin_unlock_irq(&mddev->write_lock);
+
+ atomic_set(&mddev->flush_pending, 1);
+ INIT_WORK(&mddev->barrier_work, md_submit_barrier);
+
+ submit_barriers(mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ schedule_work(&mddev->barrier_work);
+}
+EXPORT_SYMBOL(md_barrier_request);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
@@ -363,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
mutex_init(&new->open_mutex);
mutex_init(&new->reconfig_mutex);
+ mutex_init(&new->bitmap_info.mutex);
INIT_LIST_HEAD(&new->disks);
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
@@ -370,6 +479,7 @@ static mddev_t * mddev_find(dev_t unit)
atomic_set(&new->openers, 0);
atomic_set(&new->active_io, 0);
spin_lock_init(&new->write_lock);
+ atomic_set(&new->flush_pending, 0);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
new->reshape_position = MaxSector;
@@ -748,7 +858,7 @@ struct super_type {
*/
int md_check_no_bitmap(mddev_t *mddev)
{
- if (!mddev->bitmap_file && !mddev->bitmap_offset)
+ if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
@@ -876,8 +986,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = sb->size * 2;
mddev->events = ev1;
- mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
@@ -911,8 +1021,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
- mddev->bitmap_file == NULL)
- mddev->bitmap_offset = mddev->default_bitmap_offset;
+ mddev->bitmap_info.file == NULL)
+ mddev->bitmap_info.offset =
+ mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling */
@@ -1029,7 +1140,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
- if (mddev->bitmap && mddev->bitmap_file == NULL)
+ if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
@@ -1107,7 +1218,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
- if (rdev->mddev->bitmap_offset)
+ if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev->bdev);
if (!num_sectors || num_sectors > rdev->sb_start)
@@ -1286,8 +1397,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
- mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = 1024 >> 9;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1295,8 +1406,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
- mddev->bitmap_file == NULL )
- mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+ mddev->bitmap_info.file == NULL )
+ mddev->bitmap_info.offset =
+ (__s32)le32_to_cpu(sb->bitmap_offset);
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1390,19 +1502,17 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
- if (mddev->bitmap && mddev->bitmap_file == NULL) {
- sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
+ if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
+ sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
- if (rdev->recovery_offset > 0) {
- sb->feature_map |=
- cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
- sb->recovery_offset =
- cpu_to_le64(rdev->recovery_offset);
- }
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+ sb->recovery_offset =
+ cpu_to_le64(rdev->recovery_offset);
}
if (mddev->reshape_position != MaxSector) {
@@ -1436,7 +1546,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
- else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
+ else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1458,7 +1568,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
- } else if (rdev->mddev->bitmap_offset) {
+ } else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
@@ -1826,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
uuid = sb->set_uuid;
printk(KERN_INFO
- "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
- ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+ "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15],
+ uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
@@ -1843,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
- "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
- ":%02x%02x%02x%02x%02x%02x\n"
+ "md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
@@ -1857,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15],
+ uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
@@ -2442,12 +2544,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
+
+static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
+{
+ unsigned long long recovery_start = rdev->recovery_offset;
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ recovery_start == MaxSector)
+ return sprintf(page, "none\n");
+
+ return sprintf(page, "%llu\n", recovery_start);
+}
+
+static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ unsigned long long recovery_start;
+
+ if (cmd_match(buf, "none"))
+ recovery_start = MaxSector;
+ else if (strict_strtoull(buf, 10, &recovery_start))
+ return -EINVAL;
+
+ if (rdev->mddev->pers &&
+ rdev->raid_disk >= 0)
+ return -EBUSY;
+
+ rdev->recovery_offset = recovery_start;
+ if (recovery_start == MaxSector)
+ set_bit(In_sync, &rdev->flags);
+ else
+ clear_bit(In_sync, &rdev->flags);
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_recovery_start =
+__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_size.attr,
+ &rdev_recovery_start.attr,
NULL,
};
static ssize_t
@@ -2549,6 +2688,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
+ rdev->last_read_error.tv_sec = 0;
+ rdev->last_read_error.tv_nsec = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
@@ -2659,6 +2800,47 @@ static void analyze_sbs(mddev_t * mddev)
}
}
+/* Read a fixed-point number.
+ * Numbers in sysfs attributes should be in "standard" units where
+ * possible, so time should be in seconds.
+ * However we internally use a a much smaller unit such as
+ * milliseconds or jiffies.
+ * This function takes a decimal number with a possible fractional
+ * component, and produces an integer which is the result of
+ * multiplying that number by 10^'scale'.
+ * all without any floating-point arithmetic.
+ */
+int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
+{
+ unsigned long result = 0;
+ long decimals = -1;
+ while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
+ if (*cp == '.')
+ decimals = 0;
+ else if (decimals < scale) {
+ unsigned int value;
+ value = *cp - '0';
+ result = result * 10 + value;
+ if (decimals >= 0)
+ decimals++;
+ }
+ cp++;
+ }
+ if (*cp == '\n')
+ cp++;
+ if (*cp)
+ return -EINVAL;
+ if (decimals < 0)
+ decimals = 0;
+ while (decimals < scale) {
+ result *= 10;
+ decimals ++;
+ }
+ *res = result;
+ return 0;
+}
+
+
static void md_safemode_timeout(unsigned long data);
static ssize_t
@@ -2670,31 +2852,10 @@ safe_delay_show(mddev_t *mddev, char *page)
static ssize_t
safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
{
- int scale=1;
- int dot=0;
- int i;
unsigned long msec;
- char buf[30];
- /* remove a period, and count digits after it */
- if (len >= sizeof(buf))
- return -EINVAL;
- strlcpy(buf, cbuf, sizeof(buf));
- for (i=0; i<len; i++) {
- if (dot) {
- if (isdigit(buf[i])) {
- buf[i-1] = buf[i];
- scale *= 10;
- }
- buf[i] = 0;
- } else if (buf[i] == '.') {
- dot=1;
- buf[i] = 0;
- }
- }
- if (strict_strtoul(buf, 10, &msec) < 0)
+ if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
- msec = (msec * 1000) / scale;
if (msec == 0)
mddev->safemode_delay = 0;
else {
@@ -2970,7 +3131,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers)
return -EBUSY;
- if (!*buf || (*e && *e != '\n'))
+ if (cmd_match(buf, "none"))
+ n = MaxSector;
+ else if (!*buf || (*e && *e != '\n'))
return -EINVAL;
mddev->recovery_cp = n;
@@ -3166,6 +3329,29 @@ static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
+max_corrected_read_errors_show(mddev_t *mddev, char *page) {
+ return sprintf(page, "%d\n",
+ atomic_read(&mddev->max_corr_read_errors));
+}
+
+static ssize_t
+max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (*buf && (*e == 0 || *e == '\n')) {
+ atomic_set(&mddev->max_corr_read_errors, n);
+ return len;
+ }
+ return -EINVAL;
+}
+
+static struct md_sysfs_entry max_corr_read_errors =
+__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
+ max_corrected_read_errors_store);
+
+static ssize_t
null_show(mddev_t *mddev, char *page)
{
return -EINVAL;
@@ -3246,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len)
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
- buf = end;
- while (isspace(*buf)) buf++;
+ buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
@@ -3790,6 +3975,7 @@ static struct attribute *md_default_attrs[] = {
&md_array_state.attr,
&md_reshape_position.attr,
&md_array_size.attr,
+ &max_corr_read_errors.attr,
NULL,
};
@@ -3894,6 +4080,7 @@ static void mddev_delayed_delete(struct work_struct *ws)
mddev->sysfs_action = NULL;
mddev->private = NULL;
}
+ sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -3985,6 +4172,8 @@ static int md_alloc(dev_t dev, char *name)
disk->disk_name);
error = 0;
}
+ if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ printk(KERN_DEBUG "pointless warning\n");
abort:
mutex_unlock(&disks_mutex);
if (!error) {
@@ -4206,6 +4395,8 @@ static int do_md_run(mddev_t * mddev)
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
+ atomic_set(&mddev->max_corr_read_errors,
+ MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
@@ -4310,7 +4501,7 @@ static int deny_bitmap_write_access(struct file * file)
return 0;
}
-static void restore_bitmap_write_access(struct file *file)
+void restore_bitmap_write_access(struct file *file)
{
struct inode *inode = file->f_mapping->host;
@@ -4405,12 +4596,12 @@ out:
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
- if (mddev->bitmap_file) {
- restore_bitmap_write_access(mddev->bitmap_file);
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
+ if (mddev->bitmap_info.file) {
+ restore_bitmap_write_access(mddev->bitmap_info.file);
+ fput(mddev->bitmap_info.file);
+ mddev->bitmap_info.file = NULL;
}
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.offset = 0;
/* make sure all md_delayed_delete calls have finished */
flush_scheduled_work();
@@ -4451,6 +4642,11 @@ out:
mddev->degraded = 0;
mddev->barriers_work = 0;
mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
@@ -4636,7 +4832,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
- if (mddev->bitmap && mddev->bitmap_offset)
+ if (mddev->bitmap && mddev->bitmap_info.offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
@@ -4994,23 +5190,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
if (fd >= 0) {
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
- mddev->bitmap_file = fget(fd);
+ mddev->bitmap_info.file = fget(fd);
- if (mddev->bitmap_file == NULL) {
+ if (mddev->bitmap_info.file == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
- err = deny_bitmap_write_access(mddev->bitmap_file);
+ err = deny_bitmap_write_access(mddev->bitmap_info.file);
if (err) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
+ fput(mddev->bitmap_info.file);
+ mddev->bitmap_info.file = NULL;
return err;
}
- mddev->bitmap_offset = 0; /* file overrides offset */
+ mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
@@ -5025,11 +5221,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
- if (mddev->bitmap_file) {
- restore_bitmap_write_access(mddev->bitmap_file);
- fput(mddev->bitmap_file);
+ if (mddev->bitmap_info.file) {
+ restore_bitmap_write_access(mddev->bitmap_info.file);
+ fput(mddev->bitmap_info.file);
}
- mddev->bitmap_file = NULL;
+ mddev->bitmap_info.file = NULL;
}
return err;
@@ -5096,8 +5292,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
@@ -5197,7 +5393,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
int state = 0;
/* calculate expected state,ignoring low bits */
- if (mddev->bitmap && mddev->bitmap_offset)
+ if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
@@ -5256,9 +5452,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
/* add the bitmap */
if (mddev->bitmap)
return -EEXIST;
- if (mddev->default_bitmap_offset == 0)
+ if (mddev->bitmap_info.default_offset == 0)
return -EINVAL;
- mddev->bitmap_offset = mddev->default_bitmap_offset;
+ mddev->bitmap_info.offset =
+ mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (rv)
@@ -5273,7 +5470,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
@@ -5524,6 +5721,25 @@ done:
abort:
return err;
}
+#ifdef CONFIG_COMPAT
+static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case HOT_REMOVE_DISK:
+ case HOT_ADD_DISK:
+ case SET_DISK_FAULTY:
+ case SET_BITMAP_FILE:
+ /* These take in integer arg, do not convert */
+ break;
+ default:
+ arg = (unsigned long)compat_ptr(arg);
+ break;
+ }
+
+ return md_ioctl(bdev, mode, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
@@ -5589,6 +5805,9 @@ static const struct block_device_operations md_fops =
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = md_compat_ioctl,
+#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
@@ -5982,14 +6201,14 @@ static int md_seq_show(struct seq_file *seq, void *v)
unsigned long chunk_kb;
unsigned long flags;
spin_lock_irqsave(&bitmap->lock, flags);
- chunk_kb = bitmap->chunksize >> 10;
+ chunk_kb = mddev->bitmap_info.chunksize >> 10;
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
"%lu%s chunk",
bitmap->pages - bitmap->missing_pages,
bitmap->pages,
(bitmap->pages - bitmap->missing_pages)
<< (PAGE_SHIFT - 10),
- chunk_kb ? chunk_kb : bitmap->chunksize,
+ chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
@@ -6338,12 +6557,14 @@ void md_do_sync(mddev_t *mddev)
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
- list_for_each_entry(rdev, &mddev->disks, same_set)
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
+ rcu_read_unlock();
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -6380,6 +6601,7 @@ void md_do_sync(mddev_t *mddev)
desc, mdname(mddev));
mddev->curr_resync = j;
}
+ mddev->curr_resync_completed = mddev->curr_resync;
while (j < max_sectors) {
sector_t sectors;
@@ -6512,22 +6734,29 @@ void md_do_sync(mddev_t *mddev)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- list_for_each_entry(rdev, &mddev->disks, same_set)
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
}
}
set_bit(MD_CHANGE_DEVS, &mddev->flags);
skip:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ /* We completed so min/max setting can be forgotten if used. */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
- mddev->curr_resync_completed = 0;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- /* We completed so max setting can be forgotten. */
- mddev->resync_max = MaxSector;
+ mddev->curr_resync_completed = 0;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -6590,6 +6819,7 @@ static int remove_and_add_spares(mddev_t *mddev)
nm, mdname(mddev));
spares++;
md_new_event(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
} else
break;
}
@@ -6625,7 +6855,7 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
- bitmap_daemon_work(mddev->bitmap);
+ bitmap_daemon_work(mddev);
if (mddev->ro)
return;
@@ -6995,5 +7225,6 @@ EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index f184b69ef337..8e4c75c00d46 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -97,6 +97,9 @@ struct mdk_rdev_s
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
+ struct timespec last_read_error; /* monotonic time since our
+ * last read error
+ */
atomic_t corrected_errors; /* number of corrected read errors,
* for reporting to userspace and storing
* in superblock.
@@ -280,17 +283,38 @@ struct mddev_s
unsigned int max_write_behind; /* 0 = sync */
struct bitmap *bitmap; /* the bitmap for the device */
- struct file *bitmap_file; /* the bitmap file */
- long bitmap_offset; /* offset from superblock of
- * start of bitmap. May be
- * negative, but not '0'
- */
- long default_bitmap_offset; /* this is the offset to use when
- * hot-adding a bitmap. It should
- * eventually be settable by sysfs.
- */
-
+ struct {
+ struct file *file; /* the bitmap file */
+ loff_t offset; /* offset from superblock of
+ * start of bitmap. May be
+ * negative, but not '0'
+ * For external metadata, offset
+ * from start of device.
+ */
+ loff_t default_offset; /* this is the offset to use when
+ * hot-adding a bitmap. It should
+ * eventually be settable by sysfs.
+ */
+ struct mutex mutex;
+ unsigned long chunksize;
+ unsigned long daemon_sleep; /* how many seconds between updates? */
+ unsigned long max_write_behind; /* write-behind mode */
+ int external;
+ } bitmap_info;
+
+ atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+
+ /* Generic barrier handling.
+ * If there is a pending barrier request, all other
+ * writes are blocked while the devices are flushed.
+ * The last to finish a flush schedules a worker to
+ * submit the barrier request (without the barrier flag),
+ * then submit more flush requests.
+ */
+ struct bio *barrier;
+ atomic_t flush_pending;
+ struct work_struct barrier_work;
};
@@ -353,7 +377,7 @@ struct md_sysfs_entry {
ssize_t (*show)(mddev_t *, char *);
ssize_t (*store)(mddev_t *, const char *, size_t);
};
-
+extern struct attribute_group md_bitmap_group;
static inline char * mdname (mddev_t * mddev)
{
@@ -431,6 +455,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
extern int mddev_congested(mddev_t *mddev, int bits);
+extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
@@ -443,6 +468,8 @@ extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(mddev_t *mddev);
extern int md_integrity_register(mddev_t *mddev);
-void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
+extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
+extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
+extern void restore_bitmap_write_access(struct file *file);
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ee7646f974a0..32a662fc55c9 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -145,7 +145,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -581,6 +581,7 @@ static void __exit multipath_exit (void)
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("simple multi-path personality for MD");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
MODULE_ALIAS("md-multipath");
MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d3a4ce06015a..77605cdceaf1 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -567,6 +567,7 @@ static void raid0_exit (void)
module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
MODULE_ALIAS("md-personality-2"); /* RAID0 */
MODULE_ALIAS("md-raid0");
MODULE_ALIAS("md-level-0");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e07ce2e033a9..859bd3ffe435 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -677,6 +677,7 @@ static void raise_barrier(conf_t *conf)
static void lower_barrier(conf_t *conf)
{
unsigned long flags;
+ BUG_ON(conf->barrier <= 0);
spin_lock_irqsave(&conf->resync_lock, flags);
conf->barrier--;
spin_unlock_irqrestore(&conf->resync_lock, flags);
@@ -801,6 +802,25 @@ static int make_request(struct request_queue *q, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
+ if (bio_data_dir(bio) == WRITE &&
+ bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
+ bio->bi_sector < mddev->suspend_hi) {
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ DEFINE_WAIT(w);
+ for (;;) {
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_barrier,
+ &w, TASK_INTERRUPTIBLE);
+ if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
+ bio->bi_sector >= mddev->suspend_hi)
+ break;
+ schedule();
+ }
+ finish_wait(&conf->wait_barrier, &w);
+ }
if (unlikely(!mddev->barriers_work &&
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
if (rw == WRITE)
@@ -923,7 +943,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
/* do behind I/O ? */
if (bitmap &&
- atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
+ (atomic_read(&bitmap->behind_writes)
+ < mddev->bitmap_info.max_write_behind) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1941,74 +1962,48 @@ static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return mddev->dev_sectors;
}
-static int run(mddev_t *mddev)
+static conf_t *setup_conf(mddev_t *mddev)
{
conf_t *conf;
- int i, j, disk_idx;
+ int i;
mirror_info_t *disk;
mdk_rdev_t *rdev;
+ int err = -ENOMEM;
- if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
- goto out;
- }
- if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
- mdname(mddev));
- goto out;
- }
- /*
- * copy the already verified devices into our private RAID1
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
if (!conf)
- goto out_no_mem;
+ goto abort;
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->mirrors)
- goto out_no_mem;
+ goto abort;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_no_mem;
+ goto abort;
- conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
+ conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
- goto out_no_mem;
- conf->poolinfo->mddev = NULL;
+ goto abort;
conf->poolinfo->raid_disks = mddev->raid_disks;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free,
conf->poolinfo);
if (!conf->r1bio_pool)
- goto out_no_mem;
+ goto abort;
+
conf->poolinfo->mddev = mddev;
spin_lock_init(&conf->device_lock);
- mddev->queue->queue_lock = &conf->device_lock;
-
list_for_each_entry(rdev, &mddev->disks, same_set) {
- disk_idx = rdev->raid_disk;
+ int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
- */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
@@ -2022,8 +2017,7 @@ static int run(mddev_t *mddev)
bio_list_init(&conf->pending_bio_list);
bio_list_init(&conf->flushing_bio_list);
-
- mddev->degraded = 0;
+ conf->last_used = -1;
for (i = 0; i < conf->raid_disks; i++) {
disk = conf->mirrors + i;
@@ -2031,38 +2025,97 @@ static int run(mddev_t *mddev)
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
- mddev->degraded++;
if (disk->rdev)
conf->fullsync = 1;
- }
+ } else if (conf->last_used < 0)
+ /*
+ * The first working device is used as a
+ * starting point to read balancing.
+ */
+ conf->last_used = i;
}
- if (mddev->degraded == conf->raid_disks) {
+
+ err = -EIO;
+ if (conf->last_used < 0) {
printk(KERN_ERR "raid1: no operational mirrors for %s\n",
- mdname(mddev));
- goto out_free_conf;
+ mdname(mddev));
+ goto abort;
}
- if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
+ err = -ENOMEM;
+ conf->thread = md_register_thread(raid1d, mddev, NULL);
+ if (!conf->thread) {
+ printk(KERN_ERR
+ "raid1: couldn't allocate thread for %s\n",
+ mdname(mddev));
+ goto abort;
+ }
+
+ return conf;
+
+ abort:
+ if (conf) {
+ if (conf->r1bio_pool)
+ mempool_destroy(conf->r1bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf->poolinfo);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i;
+ mdk_rdev_t *rdev;
+
+ if (mddev->level != 1) {
+ printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
+ return -EIO;
+ }
+ if (mddev->reshape_position != MaxSector) {
+ printk("raid1: %s: reshape_position set but not supported\n",
+ mdname(mddev));
+ return -EIO;
+ }
/*
- * find the first working one and use it as a starting point
- * to read balancing.
+ * copy the already verified devices into our private RAID1
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
*/
- for (j = 0; j < conf->raid_disks &&
- (!conf->mirrors[j].rdev ||
- !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
- /* nothing */;
- conf->last_used = j;
+ if (mddev->private == NULL)
+ conf = setup_conf(mddev);
+ else
+ conf = mddev->private;
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
- mddev->thread = md_register_thread(raid1d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
+ mddev->queue->queue_lock = &conf->device_lock;
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+ * violating it, so limit ->max_sector to one PAGE, as
+ * a one page request is never in violation.
+ */
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
}
+ mddev->degraded = 0;
+ for (i=0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].rdev == NULL ||
+ !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
+ test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ mddev->degraded++;
+
+ if (conf->raid_disks - mddev->degraded == 1)
+ mddev->recovery_cp = MaxSector;
+
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "raid1: %s is not clean"
" -- starting background reconstruction\n",
@@ -2071,9 +2124,14 @@ static int run(mddev_t *mddev)
"raid1: raid set %s active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
+
/*
* Ok, everything is just fine now
*/
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+ mddev->private = conf;
+
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
mddev->queue->unplug_fn = raid1_unplug;
@@ -2081,23 +2139,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
return 0;
-
-out_no_mem:
- printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
- mdname(mddev));
-
-out_free_conf:
- if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
- kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
- kfree(conf);
- mddev->private = NULL;
- }
-out:
- return -EIO;
}
static int stop(mddev_t *mddev)
@@ -2271,6 +2312,9 @@ static void raid1_quiesce(mddev_t *mddev, int state)
conf_t *conf = mddev->private;
switch(state) {
+ case 2: /* wake for suspend */
+ wake_up(&conf->wait_barrier);
+ break;
case 1:
raise_barrier(conf);
break;
@@ -2280,6 +2324,23 @@ static void raid1_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid1_takeover(mddev_t *mddev)
+{
+ /* raid1 can take over:
+ * raid5 with 2 devices, any layout or chunk size
+ */
+ if (mddev->level == 5 && mddev->raid_disks == 2) {
+ conf_t *conf;
+ mddev->new_level = 1;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ conf = setup_conf(mddev);
+ if (!IS_ERR(conf))
+ conf->barrier = 1;
+ return conf;
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid1_personality =
{
@@ -2299,6 +2360,7 @@ static struct mdk_personality raid1_personality =
.size = raid1_size,
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
+ .takeover = raid1_takeover,
};
static int __init raid_init(void)
@@ -2314,6 +2376,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e87b84deff68..5f2d443ae28a 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -59,6 +59,11 @@ struct r1_private_data_s {
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r1_private_data_s conf_t;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c2cb7b87b440..d119b7b75e71 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -804,7 +804,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mdk_rdev_t *blocked_rdev;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -1432,6 +1432,43 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
/*
+ * Used by fix_read_error() to decay the per rdev read_errors.
+ * We halve the read error count for every hour that has elapsed
+ * since the last recorded read error.
+ *
+ */
+static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ struct timespec cur_time_mon;
+ unsigned long hours_since_last;
+ unsigned int read_errors = atomic_read(&rdev->read_errors);
+
+ ktime_get_ts(&cur_time_mon);
+
+ if (rdev->last_read_error.tv_sec == 0 &&
+ rdev->last_read_error.tv_nsec == 0) {
+ /* first time we've seen a read error */
+ rdev->last_read_error = cur_time_mon;
+ return;
+ }
+
+ hours_since_last = (cur_time_mon.tv_sec -
+ rdev->last_read_error.tv_sec) / 3600;
+
+ rdev->last_read_error = cur_time_mon;
+
+ /*
+ * if hours_since_last is > the number of bits in read_errors
+ * just set read errors to 0. We do this to avoid
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+ atomic_set(&rdev->read_errors, 0);
+ else
+ atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+}
+
+/*
* This is a kernel thread which:
*
* 1. Retries failed read operations on working mirrors.
@@ -1444,6 +1481,43 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
int sect = 0; /* Offset from r10_bio->sector */
int sectors = r10_bio->sectors;
mdk_rdev_t*rdev;
+ int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
+
+ rcu_read_lock();
+ {
+ int d = r10_bio->devs[r10_bio->read_slot].devnum;
+ char b[BDEVNAME_SIZE];
+ int cur_read_error_count = 0;
+
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
+ bdevname(rdev->bdev, b);
+
+ if (test_bit(Faulty, &rdev->flags)) {
+ rcu_read_unlock();
+ /* drive has already been failed, just ignore any
+ more fix_read_error() attempts */
+ return;
+ }
+
+ check_decay_read_errors(mddev, rdev);
+ atomic_inc(&rdev->read_errors);
+ cur_read_error_count = atomic_read(&rdev->read_errors);
+ if (cur_read_error_count > max_read_errors) {
+ rcu_read_unlock();
+ printk(KERN_NOTICE
+ "raid10: %s: Raid device exceeded "
+ "read_error threshold "
+ "[cur %d:max %d]\n",
+ b, cur_read_error_count, max_read_errors);
+ printk(KERN_NOTICE
+ "raid10: %s: Failing raid "
+ "device\n", b);
+ md_error(mddev, conf->mirrors[d].rdev);
+ return;
+ }
+ }
+ rcu_read_unlock();
+
while(sectors) {
int s = sectors;
int sl = r10_bio->read_slot;
@@ -1488,6 +1562,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
/* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) {
+ char b[BDEVNAME_SIZE];
int d;
if (sl==0)
sl = conf->copies;
@@ -1503,9 +1578,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
- == 0)
+ == 0) {
/* Well, this device is dead */
+ printk(KERN_NOTICE
+ "raid10:%s: read correction "
+ "write failed"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect+
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "raid10:%s: failing "
+ "drive\n",
+ bdevname(rdev->bdev, b));
md_error(mddev, rdev);
+ }
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
@@ -1526,10 +1613,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
- s<<9, conf->tmppage, READ) == 0)
+ s<<9, conf->tmppage,
+ READ) == 0) {
/* Well, this device is dead */
+ printk(KERN_NOTICE
+ "raid10:%s: unable to read back "
+ "corrected sectors"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect+
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ bdevname(rdev->bdev, b));
+
md_error(mddev, rdev);
- else
+ } else {
printk(KERN_INFO
"raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
@@ -1537,6 +1636,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
+ }
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2275,13 +2375,6 @@ static void raid10_quiesce(mddev_t *mddev, int state)
lower_barrier(conf);
break;
}
- if (mddev->thread) {
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
- else
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- md_wakeup_thread(mddev->thread);
- }
}
static struct mdk_personality raid10_personality =
@@ -2315,6 +2408,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
MODULE_ALIAS("md-raid10");
MODULE_ALIAS("md-level-10");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d29215d966da..e84204eb12df 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2947,6 +2947,7 @@ static void handle_stripe5(struct stripe_head *sh)
struct r5dev *dev;
mdk_rdev_t *blocked_rdev = NULL;
int prexor;
+ int dec_preread_active = 0;
memset(&s, 0, sizeof(s));
pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -3096,12 +3097,8 @@ static void handle_stripe5(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3208,6 +3205,16 @@ static void handle_stripe5(struct stripe_head *sh)
ops_run_io(sh, &s);
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
return_io(return_bi);
}
@@ -3221,6 +3228,7 @@ static void handle_stripe6(struct stripe_head *sh)
struct r6_state r6s;
struct r5dev *dev, *pdev, *qdev;
mdk_rdev_t *blocked_rdev = NULL;
+ int dec_preread_active = 0;
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3358,7 +3366,6 @@ static void handle_stripe6(struct stripe_head *sh)
* completed
*/
if (sh->reconstruct_state == reconstruct_state_drain_result) {
- int qd_idx = sh->qd_idx;
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity blocks are ready to
@@ -3380,12 +3387,8 @@ static void handle_stripe6(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3494,6 +3497,18 @@ static void handle_stripe6(struct stripe_head *sh)
ops_run_io(sh, &s);
+
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+
return_io(return_bi);
}
@@ -3741,7 +3756,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
- unsigned int dd_idx;
+ int dd_idx;
struct bio* align_bi;
mdk_rdev_t *rdev;
@@ -3866,7 +3881,13 @@ static int make_request(struct request_queue *q, struct bio * bi)
int cpu, remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
- bio_endio(bi, -EOPNOTSUPP);
+ /* Drain all pending writes. We only really need
+ * to ensure they have been submitted, but this is
+ * easier.
+ */
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ md_barrier_request(mddev, bi);
return 0;
}
@@ -3990,6 +4011,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
+ if (mddev->barrier &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4009,6 +4033,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
bio_endio(bi, 0);
}
+
+ if (mddev->barrier) {
+ /* We need to wait for the stripes to all be handled.
+ * So: wait for preread_active_stripes to drop to 0.
+ */
+ wait_event(mddev->thread->wqueue,
+ atomic_read(&conf->preread_active_stripes) == 0);
+ }
return 0;
}
@@ -5860,6 +5892,7 @@ static void raid5_exit(void)
module_init(raid5_init);
module_exit(raid5_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
MODULE_ALIAS("md-personality-4"); /* RAID5 */
MODULE_ALIAS("md-raid5");
MODULE_ALIAS("md-raid4");
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 866215ac7f25..bffc61bff5ab 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -31,25 +31,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page);
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-
const struct raid6_calls * const raid6_algos[] = {
&raid6_intx1,
&raid6_intx2,
@@ -169,3 +150,4 @@ static void raid6_exit(void)
subsys_initcall(raid6_select_algo);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 12a1b3d7132d..c3916a42668e 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -2127,7 +2127,7 @@ vpfe_resume(struct device *dev)
return -1;
}
-static struct dev_pm_ops vpfe_dev_pm_ops = {
+static const struct dev_pm_ops vpfe_dev_pm_ops = {
.suspend = vpfe_suspend,
.resume = vpfe_resume,
};
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d947ee5e4eb4..78130721f578 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2107,7 +2107,7 @@ vpif_resume(struct device *dev)
return -1;
}
-static struct dev_pm_ops vpif_dev_pm_ops = {
+static const struct dev_pm_ops vpif_dev_pm_ops = {
.suspend = vpif_suspend,
.resume = vpif_resume,
};
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index a4f3472d4db8..961e4484d721 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -1825,7 +1825,7 @@ static int sh_mobile_ceu_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
.runtime_suspend = sh_mobile_ceu_runtime_nop,
.runtime_resume = sh_mobile_ceu_runtime_nop,
};
diff --git a/drivers/mfd/88pm8607.c b/drivers/mfd/88pm8607.c
new file mode 100644
index 000000000000..7e3f65907993
--- /dev/null
+++ b/drivers/mfd/88pm8607.c
@@ -0,0 +1,302 @@
+/*
+ * Base driver for Marvell 88PM8607
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm8607.h>
+
+
+#define PM8607_REG_RESOURCE(_start, _end) \
+{ \
+ .start = PM8607_##_start, \
+ .end = PM8607_##_end, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource pm8607_regulator_resources[] = {
+ PM8607_REG_RESOURCE(BUCK1, BUCK1),
+ PM8607_REG_RESOURCE(BUCK2, BUCK2),
+ PM8607_REG_RESOURCE(BUCK3, BUCK3),
+ PM8607_REG_RESOURCE(LDO1, LDO1),
+ PM8607_REG_RESOURCE(LDO2, LDO2),
+ PM8607_REG_RESOURCE(LDO3, LDO3),
+ PM8607_REG_RESOURCE(LDO4, LDO4),
+ PM8607_REG_RESOURCE(LDO5, LDO5),
+ PM8607_REG_RESOURCE(LDO6, LDO6),
+ PM8607_REG_RESOURCE(LDO7, LDO7),
+ PM8607_REG_RESOURCE(LDO8, LDO8),
+ PM8607_REG_RESOURCE(LDO9, LDO9),
+ PM8607_REG_RESOURCE(LDO10, LDO10),
+ PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(LDO14, LDO14),
+};
+
+#define PM8607_REG_DEVS(_name, _id) \
+{ \
+ .name = "88pm8607-" #_name, \
+ .num_resources = 1, \
+ .resources = &pm8607_regulator_resources[PM8607_ID_##_id], \
+}
+
+static struct mfd_cell pm8607_devs[] = {
+ PM8607_REG_DEVS(buck1, BUCK1),
+ PM8607_REG_DEVS(buck2, BUCK2),
+ PM8607_REG_DEVS(buck3, BUCK3),
+ PM8607_REG_DEVS(ldo1, LDO1),
+ PM8607_REG_DEVS(ldo2, LDO2),
+ PM8607_REG_DEVS(ldo3, LDO3),
+ PM8607_REG_DEVS(ldo4, LDO4),
+ PM8607_REG_DEVS(ldo5, LDO5),
+ PM8607_REG_DEVS(ldo6, LDO6),
+ PM8607_REG_DEVS(ldo7, LDO7),
+ PM8607_REG_DEVS(ldo8, LDO8),
+ PM8607_REG_DEVS(ldo9, LDO9),
+ PM8607_REG_DEVS(ldo10, LDO10),
+ PM8607_REG_DEVS(ldo12, LDO12),
+ PM8607_REG_DEVS(ldo14, LDO14),
+};
+
+static inline int pm8607_read_device(struct pm8607_chip *chip,
+ int reg, int bytes, void *dest)
+{
+ struct i2c_client *i2c = chip->client;
+ unsigned char data;
+ int ret;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pm8607_write_device(struct pm8607_chip *chip,
+ int reg, int bytes, void *src)
+{
+ struct i2c_client *i2c = chip->client;
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int pm8607_reg_read(struct pm8607_chip *chip, int reg)
+{
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ if (ret < 0)
+ return ret;
+ else
+ return (int)data;
+}
+EXPORT_SYMBOL(pm8607_reg_read);
+
+int pm8607_reg_write(struct pm8607_chip *chip, int reg,
+ unsigned char data)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->write(chip, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_reg_write);
+
+int pm8607_bulk_read(struct pm8607_chip *chip, int reg,
+ int count, unsigned char *buf)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_bulk_read);
+
+int pm8607_bulk_write(struct pm8607_chip *chip, int reg,
+ int count, unsigned char *buf)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->write(chip, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_bulk_write);
+
+int pm8607_set_bits(struct pm8607_chip *chip, int reg,
+ unsigned char mask, unsigned char data)
+{
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = chip->write(chip, reg, 1, &value);
+out:
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_set_bits);
+
+
+static const struct i2c_device_id pm8607_id_table[] = {
+ { "88PM8607", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pm8607_id_table);
+
+
+static int __devinit pm8607_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pm8607_platform_data *pdata = client->dev.platform_data;
+ struct pm8607_chip *chip;
+ int i, count;
+ int ret;
+
+ chip = kzalloc(sizeof(struct pm8607_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->read = pm8607_read_device;
+ chip->write = pm8607_write_device;
+ i2c_set_clientdata(client, chip);
+
+ mutex_init(&chip->io_lock);
+ dev_set_drvdata(chip->dev, chip);
+
+ ret = pm8607_reg_read(chip, PM8607_CHIP_ID);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
+ goto out;
+ }
+ if ((ret & CHIP_ID_MASK) == CHIP_ID)
+ dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n",
+ ret);
+ else {
+ dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
+ "Chip ID: %02x\n", ret);
+ goto out;
+ }
+ chip->chip_id = ret;
+
+ ret = pm8607_reg_read(chip, PM8607_BUCK3);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read BUCK3 register: %d\n", ret);
+ goto out;
+ }
+ if (ret & PM8607_BUCK3_DOUBLE)
+ chip->buck3_double = 1;
+
+ ret = pm8607_reg_read(chip, PM8607_MISC1);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read MISC1 register: %d\n", ret);
+ goto out;
+ }
+ if (pdata->i2c_port == PI2C_PORT)
+ ret |= PM8607_MISC1_PI2C;
+ else
+ ret &= ~PM8607_MISC1_PI2C;
+ ret = pm8607_reg_write(chip, PM8607_MISC1, ret);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write MISC1 register: %d\n", ret);
+ goto out;
+ }
+
+
+ count = ARRAY_SIZE(pm8607_devs);
+ for (i = 0; i < count; i++) {
+ ret = mfd_add_devices(chip->dev, i, &pm8607_devs[i],
+ 1, NULL, 0);
+ if (ret != 0) {
+ dev_err(chip->dev, "Failed to add subdevs\n");
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+}
+
+static int __devexit pm8607_remove(struct i2c_client *client)
+{
+ struct pm8607_chip *chip = i2c_get_clientdata(client);
+
+ mfd_remove_devices(chip->dev);
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver pm8607_driver = {
+ .driver = {
+ .name = "88PM8607",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm8607_probe,
+ .remove = __devexit_p(pm8607_remove),
+ .id_table = pm8607_id_table,
+};
+
+static int __init pm8607_init(void)
+{
+ int ret;
+ ret = i2c_add_driver(&pm8607_driver);
+ if (ret != 0)
+ pr_err("Failed to register 88PM8607 I2C driver: %d\n", ret);
+ return ret;
+}
+subsys_initcall(pm8607_init);
+
+static void __exit pm8607_exit(void)
+{
+ i2c_del_driver(&pm8607_driver);
+}
+module_exit(pm8607_exit);
+
+MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM8607");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a296e717e86e..87829789243e 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -103,10 +103,10 @@ config MENELAUS
cell phones and PDAs.
config TWL4030_CORE
- bool "Texas Instruments TWL4030/TPS659x0 Support"
+ bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
help
- Say yes here if you have TWL4030 family chip on your board.
+ Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
facilities, and registers devices for the various functions
so that function-specific drivers can bind to them.
@@ -174,6 +174,16 @@ config PMIC_DA903X
individual components like LCD backlight, voltage regulators,
LEDs and battery-charger under the corresponding menus.
+config PMIC_ADP5520
+ bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
+ depends on I2C=y
+ help
+ Say yes here to add support for Analog Devices AD5520 and ADP5501,
+ Multifunction Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like LCD backlight, LEDs, GPIOs and Kepad
+ under the corresponding menus.
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -185,12 +195,12 @@ config MFD_WM8400
the functionality of the device.
config MFD_WM831X
- tristate "Support Wolfson Microelectronics WM831x PMICs"
+ bool "Support Wolfson Microelectronics WM831x/2x PMICs"
select MFD_CORE
- depends on I2C
+ depends on I2C=y
help
- Support for the Wolfson Microelecronics WM831x PMICs. This
- driver provides common support for accessing the device,
+ Support for the Wolfson Microelecronics WM831x and WM832x PMICs.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -319,6 +329,25 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
+config MFD_88PM8607
+ bool "Support Marvell 88PM8607"
+ depends on I2C=y
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM8607 Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like voltage regulators, RTC and
+ battery-charger under the corresponding menus.
+
+config AB4500_CORE
+ tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip"
+ depends on SPI
+ help
+ Select this option to enable access to AB4500 power management
+ chip. This connects to U8500 on the SSP/SPI bus and exports
+ read/write functions for the devices to get access to this chip.
+ This chip embeds various other multimedia funtionalities as well.
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 11350c1d9301..ca2f2c4ff05e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -19,13 +19,14 @@ obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
+wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_MENELAUS) += menelaus.o
-obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
+obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
@@ -52,3 +53,6 @@ obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
+obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o
+obj-$(CONFIG_PMIC_ADP5520) += adp5520.o \ No newline at end of file
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 613481028272..fd42a80e7bf9 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -900,9 +900,6 @@ static int __init ab3100_probe(struct i2c_client *client,
goto exit_no_testreg_client;
}
- strlcpy(ab3100->testreg_client->name, id->name,
- sizeof(ab3100->testreg_client->name));
-
err = ab3100_setup(ab3100);
if (err)
goto exit_no_setup;
diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c
new file mode 100644
index 000000000000..1c44c19e073a
--- /dev/null
+++ b/drivers/mfd/ab4500-core.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation.
+ *
+ * AB4500 is a companion power management chip used with U8500.
+ * On this platform, this is interfaced with SSP0 controller
+ * which is a ARM primecell pl022.
+ *
+ * At the moment the module just exports read/write features.
+ * Interrupt management to be added - TODO.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ab4500.h>
+
+/* just required if probe fails, we need to
+ * unregister the device
+ */
+static struct spi_driver ab4500_driver;
+
+/*
+ * This funtion writes to any AB4500 registers using
+ * SPI protocol & before it writes it packs the data
+ * in the below 24 bit frame format
+ *
+ * *|------------------------------------|
+ * *| 23|22...18|17.......10|9|8|7......0|
+ * *| r/w bank adr data |
+ * * ------------------------------------
+ *
+ * This function shouldn't be called from interrupt
+ * context
+ */
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data)
+{
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int err;
+ unsigned long spi_data =
+ block << 18 | addr << 10 | data;
+
+ mutex_lock(&ab4500->lock);
+ ab4500->tx_buf[0] = spi_data;
+ ab4500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab4500->tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ err = spi_sync(ab4500->spi, &msg);
+ mutex_unlock(&ab4500->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(ab4500_write);
+
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr)
+{
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ unsigned long spi_data =
+ 1 << 23 | block << 18 | addr << 10;
+
+ mutex_lock(&ab4500->lock);
+ ab4500->tx_buf[0] = spi_data;
+ ab4500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab4500->tx_buf;
+ xfer.rx_buf = ab4500->rx_buf;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ spi_sync(ab4500->spi, &msg);
+ mutex_unlock(&ab4500->lock);
+
+ return ab4500->rx_buf[0];
+}
+EXPORT_SYMBOL(ab4500_read);
+
+/* ref: ab3100 core */
+#define AB4500_DEVICE(devname, devid) \
+static struct platform_device ab4500_##devname##_device = { \
+ .name = devid, \
+ .id = -1, \
+}
+
+/* list of childern devices of ab4500 - all are
+ * not populated here - TODO
+ */
+AB4500_DEVICE(charger, "ab4500-charger");
+AB4500_DEVICE(audio, "ab4500-audio");
+AB4500_DEVICE(usb, "ab4500-usb");
+AB4500_DEVICE(tvout, "ab4500-tvout");
+AB4500_DEVICE(sim, "ab4500-sim");
+AB4500_DEVICE(gpadc, "ab4500-gpadc");
+AB4500_DEVICE(clkmgt, "ab4500-clkmgt");
+AB4500_DEVICE(misc, "ab4500-misc");
+
+static struct platform_device *ab4500_platform_devs[] = {
+ &ab4500_charger_device,
+ &ab4500_audio_device,
+ &ab4500_usb_device,
+ &ab4500_tvout_device,
+ &ab4500_sim_device,
+ &ab4500_gpadc_device,
+ &ab4500_clkmgt_device,
+ &ab4500_misc_device,
+};
+
+static int __init ab4500_probe(struct spi_device *spi)
+{
+ struct ab4500 *ab4500;
+ unsigned char revision;
+ int err = 0;
+ int i;
+
+ ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL);
+ if (!ab4500) {
+ dev_err(&spi->dev, "could not allocate AB4500\n");
+ err = -ENOMEM;
+ goto not_detect;
+ }
+
+ ab4500->spi = spi;
+ spi_set_drvdata(spi, ab4500);
+
+ mutex_init(&ab4500->lock);
+
+ /* read the revision register */
+ revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG);
+
+ /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */
+ if (revision == 0x0 || revision == 0x10)
+ dev_info(&spi->dev, "Detected chip: %s, revision = %x\n",
+ ab4500_driver.driver.name, revision);
+ else {
+ dev_err(&spi->dev, "unknown chip: 0x%x\n", revision);
+ goto not_detect;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) {
+ ab4500_platform_devs[i]->dev.parent =
+ &spi->dev;
+ platform_set_drvdata(ab4500_platform_devs[i], ab4500);
+ }
+
+ /* register the ab4500 platform devices */
+ platform_add_devices(ab4500_platform_devs,
+ ARRAY_SIZE(ab4500_platform_devs));
+
+ return err;
+
+ not_detect:
+ spi_unregister_driver(&ab4500_driver);
+ kfree(ab4500);
+ return err;
+}
+
+static int __devexit ab4500_remove(struct spi_device *spi)
+{
+ struct ab4500 *ab4500 =
+ spi_get_drvdata(spi);
+
+ kfree(ab4500);
+
+ return 0;
+}
+
+static struct spi_driver ab4500_driver = {
+ .driver = {
+ .name = "ab4500",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab4500_probe,
+ .remove = __devexit_p(ab4500_remove)
+};
+
+static int __devinit ab4500_init(void)
+{
+ return spi_register_driver(&ab4500_driver);
+}
+
+static void __exit ab4500_exit(void)
+{
+ spi_unregister_driver(&ab4500_driver);
+}
+
+subsys_initcall(ab4500_init);
+module_exit(ab4500_exit);
+
+MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
+MODULE_DESCRIPTION("AB4500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
new file mode 100644
index 000000000000..b26644772d02
--- /dev/null
+++ b/drivers/mfd/adp5520.c
@@ -0,0 +1,379 @@
+/*
+ * Base driver for Analog Devices ADP5520/ADP5501 MFD PMICs
+ * LCD Backlight: drivers/video/backlight/adp5520_bl
+ * LEDs : drivers/led/leds-adp5520
+ * GPIO : drivers/gpio/adp5520-gpio (ADP5520 only)
+ * Keys : drivers/input/keyboard/adp5520-keys (ADP5520 only)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Derived from da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_chip {
+ struct i2c_client *client;
+ struct device *dev;
+ struct mutex lock;
+ struct blocking_notifier_head notifier_list;
+ int irq;
+ unsigned long id;
+};
+
+static int __adp5520_read(struct i2c_client *client,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ return 0;
+}
+
+static int __adp5520_write(struct i2c_client *client,
+ int reg, uint8_t val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+ val, reg);
+ return ret;
+ }
+ return 0;
+}
+
+static int __adp5520_ack_bits(struct i2c_client *client, int reg,
+ uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(client, reg, &reg_val);
+
+ if (!ret) {
+ reg_val |= bit_mask;
+ ret = __adp5520_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+int adp5520_write(struct device *dev, int reg, uint8_t val)
+{
+ return __adp5520_write(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(adp5520_write);
+
+int adp5520_read(struct device *dev, int reg, uint8_t *val)
+{
+ return __adp5520_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(adp5520_read);
+
+int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(chip->client, reg, &reg_val);
+
+ if (!ret && ((reg_val & bit_mask) == 0)) {
+ reg_val |= bit_mask;
+ ret = __adp5520_write(chip->client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adp5520_set_bits);
+
+int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(chip->client, reg, &reg_val);
+
+ if (!ret && (reg_val & bit_mask)) {
+ reg_val &= ~bit_mask;
+ ret = __adp5520_write(chip->client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adp5520_clr_bits);
+
+int adp5520_register_notifier(struct device *dev, struct notifier_block *nb,
+ unsigned int events)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->irq) {
+ adp5520_set_bits(chip->dev, ADP5520_INTERRUPT_ENABLE,
+ events & (ADP5520_KP_IEN | ADP5520_KR_IEN |
+ ADP5520_OVP_IEN | ADP5520_CMPR_IEN));
+
+ return blocking_notifier_chain_register(&chip->notifier_list,
+ nb);
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(adp5520_register_notifier);
+
+int adp5520_unregister_notifier(struct device *dev, struct notifier_block *nb,
+ unsigned int events)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+
+ adp5520_clr_bits(chip->dev, ADP5520_INTERRUPT_ENABLE,
+ events & (ADP5520_KP_IEN | ADP5520_KR_IEN |
+ ADP5520_OVP_IEN | ADP5520_CMPR_IEN));
+
+ return blocking_notifier_chain_unregister(&chip->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(adp5520_unregister_notifier);
+
+static irqreturn_t adp5520_irq_thread(int irq, void *data)
+{
+ struct adp5520_chip *chip = data;
+ unsigned int events;
+ uint8_t reg_val;
+ int ret;
+
+ ret = __adp5520_read(chip->client, ADP5520_MODE_STATUS, &reg_val);
+ if (ret)
+ goto out;
+
+ events = reg_val & (ADP5520_OVP_INT | ADP5520_CMPR_INT |
+ ADP5520_GPI_INT | ADP5520_KR_INT | ADP5520_KP_INT);
+
+ blocking_notifier_call_chain(&chip->notifier_list, events, NULL);
+ /* ACK, Sticky bits are W1C */
+ __adp5520_ack_bits(chip->client, ADP5520_MODE_STATUS, events);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int __remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int adp5520_remove_subdevs(struct adp5520_chip *chip)
+{
+ return device_for_each_child(chip->dev, NULL, __remove_subdev);
+}
+
+static int __devinit adp5520_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5520_platform_data *pdata = client->dev.platform_data;
+ struct platform_device *pdev;
+ struct adp5520_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ chip->dev = &client->dev;
+ chip->irq = client->irq;
+ chip->id = id->driver_data;
+ mutex_init(&chip->lock);
+
+ if (chip->irq) {
+ BLOCKING_INIT_NOTIFIER_HEAD(&chip->notifier_list);
+
+ ret = request_threaded_irq(chip->irq, NULL, adp5520_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "adp5520", chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ chip->irq);
+ goto out_free_chip;
+ }
+ }
+
+ ret = adp5520_write(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ goto out_free_irq;
+ }
+
+ if (pdata->keys) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-keys",
+ chip->id, pdata->keys, sizeof(*pdata->keys));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->gpio) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-gpio",
+ chip->id, pdata->gpio, sizeof(*pdata->gpio));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->leds) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-led",
+ chip->id, pdata->leds, sizeof(*pdata->leds));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->backlight) {
+ pdev = platform_device_register_data(chip->dev,
+ "adp5520-backlight",
+ chip->id,
+ pdata->backlight,
+ sizeof(*pdata->backlight));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ return 0;
+
+out_remove_subdevs:
+ adp5520_remove_subdevs(chip);
+
+out_free_irq:
+ if (chip->irq)
+ free_irq(chip->irq, chip);
+
+out_free_chip:
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit adp5520_remove(struct i2c_client *client)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ if (chip->irq)
+ free_irq(chip->irq, chip);
+
+ adp5520_remove_subdevs(chip);
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp5520_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ return 0;
+}
+
+static int adp5520_resume(struct i2c_client *client)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ return 0;
+}
+#else
+#define adp5520_suspend NULL
+#define adp5520_resume NULL
+#endif
+
+static const struct i2c_device_id adp5520_id[] = {
+ { "pmic-adp5520", ID_ADP5520 },
+ { "pmic-adp5501", ID_ADP5501 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp5520_id);
+
+static struct i2c_driver adp5520_driver = {
+ .driver = {
+ .name = "adp5520",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_probe,
+ .remove = __devexit_p(adp5520_remove),
+ .suspend = adp5520_suspend,
+ .resume = adp5520_resume,
+ .id_table = adp5520_id,
+};
+
+static int __init adp5520_init(void)
+{
+ return i2c_add_driver(&adp5520_driver);
+}
+module_init(adp5520_init);
+
+static void __exit adp5520_exit(void)
+{
+ i2c_del_driver(&adp5520_driver);
+}
+module_exit(adp5520_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 63a2a6632106..e22128c3e9a8 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -908,7 +908,7 @@ static int __init asic3_probe(struct platform_device *pdev)
return ret;
}
-static int asic3_remove(struct platform_device *pdev)
+static int __devexit asic3_remove(struct platform_device *pdev)
{
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 876288917976..df405af968fa 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -387,7 +387,6 @@ static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
pdev = platform_device_alloc(subdev->name, subdev->id);
pdev->dev.parent = &pcap->spi->dev;
pdev->dev.platform_data = subdev->platform_data;
- platform_set_drvdata(pdev, pcap);
return platform_device_add(pdev);
}
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index e354d2912ef1..a1ade2324ea9 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -1,286 +1,549 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This code is in parts based on wm8350-core.c and pcf50633-core.c
- *
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
-
-#include <linux/mfd/mc13783-private.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/mc13783.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/spi/spi.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13783-private.h>
+
+#define MC13783_IRQSTAT0 0
+#define MC13783_IRQSTAT0_ADCDONEI (1 << 0)
+#define MC13783_IRQSTAT0_ADCBISDONEI (1 << 1)
+#define MC13783_IRQSTAT0_TSI (1 << 2)
+#define MC13783_IRQSTAT0_WHIGHI (1 << 3)
+#define MC13783_IRQSTAT0_WLOWI (1 << 4)
+#define MC13783_IRQSTAT0_CHGDETI (1 << 6)
+#define MC13783_IRQSTAT0_CHGOVI (1 << 7)
+#define MC13783_IRQSTAT0_CHGREVI (1 << 8)
+#define MC13783_IRQSTAT0_CHGSHORTI (1 << 9)
+#define MC13783_IRQSTAT0_CCCVI (1 << 10)
+#define MC13783_IRQSTAT0_CHGCURRI (1 << 11)
+#define MC13783_IRQSTAT0_BPONI (1 << 12)
+#define MC13783_IRQSTAT0_LOBATLI (1 << 13)
+#define MC13783_IRQSTAT0_LOBATHI (1 << 14)
+#define MC13783_IRQSTAT0_UDPI (1 << 15)
+#define MC13783_IRQSTAT0_USBI (1 << 16)
+#define MC13783_IRQSTAT0_IDI (1 << 19)
+#define MC13783_IRQSTAT0_SE1I (1 << 21)
+#define MC13783_IRQSTAT0_CKDETI (1 << 22)
+#define MC13783_IRQSTAT0_UDMI (1 << 23)
+
+#define MC13783_IRQMASK0 1
+#define MC13783_IRQMASK0_ADCDONEM MC13783_IRQSTAT0_ADCDONEI
+#define MC13783_IRQMASK0_ADCBISDONEM MC13783_IRQSTAT0_ADCBISDONEI
+#define MC13783_IRQMASK0_TSM MC13783_IRQSTAT0_TSI
+#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI
+#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI
+#define MC13783_IRQMASK0_CHGDETM MC13783_IRQSTAT0_CHGDETI
+#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI
+#define MC13783_IRQMASK0_CHGREVM MC13783_IRQSTAT0_CHGREVI
+#define MC13783_IRQMASK0_CHGSHORTM MC13783_IRQSTAT0_CHGSHORTI
+#define MC13783_IRQMASK0_CCCVM MC13783_IRQSTAT0_CCCVI
+#define MC13783_IRQMASK0_CHGCURRM MC13783_IRQSTAT0_CHGCURRI
+#define MC13783_IRQMASK0_BPONM MC13783_IRQSTAT0_BPONI
+#define MC13783_IRQMASK0_LOBATLM MC13783_IRQSTAT0_LOBATLI
+#define MC13783_IRQMASK0_LOBATHM MC13783_IRQSTAT0_LOBATHI
+#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI
+#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI
+#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI
+#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I
+#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI
+#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI
+
+#define MC13783_IRQSTAT1 3
+#define MC13783_IRQSTAT1_1HZI (1 << 0)
+#define MC13783_IRQSTAT1_TODAI (1 << 1)
+#define MC13783_IRQSTAT1_ONOFD1I (1 << 3)
+#define MC13783_IRQSTAT1_ONOFD2I (1 << 4)
+#define MC13783_IRQSTAT1_ONOFD3I (1 << 5)
+#define MC13783_IRQSTAT1_SYSRSTI (1 << 6)
+#define MC13783_IRQSTAT1_RTCRSTI (1 << 7)
+#define MC13783_IRQSTAT1_PCI (1 << 8)
+#define MC13783_IRQSTAT1_WARMI (1 << 9)
+#define MC13783_IRQSTAT1_MEMHLDI (1 << 10)
+#define MC13783_IRQSTAT1_PWRRDYI (1 << 11)
+#define MC13783_IRQSTAT1_THWARNLI (1 << 12)
+#define MC13783_IRQSTAT1_THWARNHI (1 << 13)
+#define MC13783_IRQSTAT1_CLKI (1 << 14)
+#define MC13783_IRQSTAT1_SEMAFI (1 << 15)
+#define MC13783_IRQSTAT1_MC2BI (1 << 17)
+#define MC13783_IRQSTAT1_HSDETI (1 << 18)
+#define MC13783_IRQSTAT1_HSLI (1 << 19)
+#define MC13783_IRQSTAT1_ALSPTHI (1 << 20)
+#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21)
+
+#define MC13783_IRQMASK1 4
+#define MC13783_IRQMASK1_1HZM MC13783_IRQSTAT1_1HZI
+#define MC13783_IRQMASK1_TODAM MC13783_IRQSTAT1_TODAI
+#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I
+#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I
+#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I
+#define MC13783_IRQMASK1_SYSRSTM MC13783_IRQSTAT1_SYSRSTI
+#define MC13783_IRQMASK1_RTCRSTM MC13783_IRQSTAT1_RTCRSTI
+#define MC13783_IRQMASK1_PCM MC13783_IRQSTAT1_PCI
+#define MC13783_IRQMASK1_WARMM MC13783_IRQSTAT1_WARMI
+#define MC13783_IRQMASK1_MEMHLDM MC13783_IRQSTAT1_MEMHLDI
+#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI
+#define MC13783_IRQMASK1_THWARNLM MC13783_IRQSTAT1_THWARNLI
+#define MC13783_IRQMASK1_THWARNHM MC13783_IRQSTAT1_THWARNHI
+#define MC13783_IRQMASK1_CLKM MC13783_IRQSTAT1_CLKI
+#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI
+#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI
+#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI
+#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI
+#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI
+#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI
+
+#define MC13783_ADC1 44
+#define MC13783_ADC1_ADEN (1 << 0)
+#define MC13783_ADC1_RAND (1 << 1)
+#define MC13783_ADC1_ADSEL (1 << 3)
+#define MC13783_ADC1_ASC (1 << 20)
+#define MC13783_ADC1_ADTRIGIGN (1 << 21)
+
+#define MC13783_NUMREGS 0x3f
+
+void mc13783_lock(struct mc13783 *mc13783)
+{
+ if (!mutex_trylock(&mc13783->lock)) {
+ dev_dbg(&mc13783->spidev->dev, "wait for %s from %pf\n",
+ __func__, __builtin_return_address(0));
+
+ mutex_lock(&mc13783->lock);
+ }
+ dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(mc13783_lock);
-#define MC13783_MAX_REG_NUM 0x3f
-#define MC13783_FRAME_MASK 0x00ffffff
-#define MC13783_MAX_REG_NUM 0x3f
-#define MC13783_REG_NUM_SHIFT 0x19
-#define MC13783_WRITE_BIT_SHIFT 31
+void mc13783_unlock(struct mc13783 *mc13783)
+{
+ dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+ mutex_unlock(&mc13783->lock);
+}
+EXPORT_SYMBOL(mc13783_unlock);
-static inline int spi_rw(struct spi_device *spi, u8 * buf, size_t len)
+#define MC13783_REGOFFSET_SHIFT 25
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val)
{
- struct spi_transfer t = {
- .tx_buf = (const void *)buf,
- .rx_buf = buf,
- .len = len,
- .cs_change = 0,
- .delay_usecs = 0,
- };
+ struct spi_transfer t;
struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+
+ if (offset > MC13783_NUMREGS)
+ return -EINVAL;
+
+ *val = offset << MC13783_REGOFFSET_SHIFT;
+
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = val;
+ t.rx_buf = val;
+ t.len = sizeof(u32);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
- if (spi_sync(spi, &m) != 0 || m.status != 0)
- return -EINVAL;
- return len - m.actual_length;
-}
-static int mc13783_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val)
-{
- unsigned int frame = 0;
- int ret = 0;
+ ret = spi_sync(mc13783->spidev, &m);
- if (reg_num > MC13783_MAX_REG_NUM)
- return -EINVAL;
+ /* error in message.status implies error return from spi_sync */
+ BUG_ON(!ret && m.status);
- frame |= reg_num << MC13783_REG_NUM_SHIFT;
+ if (ret)
+ return ret;
- ret = spi_rw(mc13783->spi_device, (u8 *)&frame, 4);
+ *val &= 0xffffff;
- *reg_val = frame & MC13783_FRAME_MASK;
+ dev_vdbg(&mc13783->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
- return ret;
+ return 0;
}
+EXPORT_SYMBOL(mc13783_reg_read);
-static int mc13783_write(struct mc13783 *mc13783, int reg_num, u32 reg_val)
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val)
{
- unsigned int frame = 0;
+ u32 buf;
+ struct spi_transfer t;
+ struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
- if (reg_num > MC13783_MAX_REG_NUM)
+ dev_vdbg(&mc13783->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+
+ if (offset > MC13783_NUMREGS || val > 0xffffff)
return -EINVAL;
- frame |= (1 << MC13783_WRITE_BIT_SHIFT);
- frame |= reg_num << MC13783_REG_NUM_SHIFT;
- frame |= reg_val & MC13783_FRAME_MASK;
+ buf = 1 << 31 | offset << MC13783_REGOFFSET_SHIFT | val;
+
+ memset(&t, 0, sizeof(t));
- return spi_rw(mc13783->spi_device, (u8 *)&frame, 4);
+ t.tx_buf = &buf;
+ t.rx_buf = &buf;
+ t.len = sizeof(u32);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(mc13783->spidev, &m);
+
+ BUG_ON(!ret && m.status);
+
+ if (ret)
+ return ret;
+
+ return 0;
}
+EXPORT_SYMBOL(mc13783_reg_write);
-int mc13783_reg_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val)
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
{
int ret;
+ u32 valread;
- mutex_lock(&mc13783->io_lock);
- ret = mc13783_read(mc13783, reg_num, reg_val);
- mutex_unlock(&mc13783->io_lock);
+ BUG_ON(val & ~mask);
- return ret;
+ ret = mc13783_reg_read(mc13783, offset, &valread);
+ if (ret)
+ return ret;
+
+ valread = (valread & ~mask) | val;
+
+ return mc13783_reg_write(mc13783, offset, valread);
}
-EXPORT_SYMBOL_GPL(mc13783_reg_read);
+EXPORT_SYMBOL(mc13783_reg_rmw);
-int mc13783_reg_write(struct mc13783 *mc13783, int reg_num, u32 reg_val)
+int mc13783_mask(struct mc13783 *mc13783, int irq)
{
int ret;
+ unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
- mutex_lock(&mc13783->io_lock);
- ret = mc13783_write(mc13783, reg_num, reg_val);
- mutex_unlock(&mc13783->io_lock);
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
+ return -EINVAL;
- return ret;
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
+
+ if (mask & irqbit)
+ /* already masked */
+ return 0;
+
+ return mc13783_reg_write(mc13783, offmask, mask | irqbit);
}
-EXPORT_SYMBOL_GPL(mc13783_reg_write);
+EXPORT_SYMBOL(mc13783_mask);
-/**
- * mc13783_set_bits - Bitmask write
- *
- * @mc13783: Pointer to mc13783 control structure
- * @reg: Register to access
- * @mask: Mask of bits to change
- * @val: Value to set for masked bits
- */
-int mc13783_set_bits(struct mc13783 *mc13783, int reg, u32 mask, u32 val)
+int mc13783_unmask(struct mc13783 *mc13783, int irq)
{
- u32 tmp;
int ret;
+ unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
- mutex_lock(&mc13783->io_lock);
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
+ return -EINVAL;
- ret = mc13783_read(mc13783, reg, &tmp);
- tmp = (tmp & ~mask) | val;
- if (ret == 0)
- ret = mc13783_write(mc13783, reg, tmp);
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
- mutex_unlock(&mc13783->io_lock);
+ if (!(mask & irqbit))
+ /* already unmasked */
+ return 0;
- return ret;
+ return mc13783_reg_write(mc13783, offmask, mask & ~irqbit);
}
-EXPORT_SYMBOL_GPL(mc13783_set_bits);
+EXPORT_SYMBOL(mc13783_unmask);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data)
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev)
{
- if (irq < 0 || irq > MC13783_NUM_IRQ || !handler)
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+ BUG_ON(!handler);
+
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
return -EINVAL;
- if (WARN_ON(mc13783->irq_handler[irq].handler))
+ if (mc13783->irqhandler[irq])
return -EBUSY;
- mutex_lock(&mc13783->io_lock);
- mc13783->irq_handler[irq].handler = handler;
- mc13783->irq_handler[irq].data = data;
- mutex_unlock(&mc13783->io_lock);
+ mc13783->irqhandler[irq] = handler;
+ mc13783->irqdata[irq] = dev;
return 0;
}
-EXPORT_SYMBOL_GPL(mc13783_register_irq);
+EXPORT_SYMBOL(mc13783_irq_request_nounmask);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq)
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev)
{
- if (irq < 0 || irq > MC13783_NUM_IRQ)
+ int ret;
+
+ ret = mc13783_irq_request_nounmask(mc13783, irq, handler, name, dev);
+ if (ret)
+ return ret;
+
+ ret = mc13783_unmask(mc13783, irq);
+ if (ret) {
+ mc13783->irqhandler[irq] = NULL;
+ mc13783->irqdata[irq] = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13783_irq_request);
+
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev)
+{
+ int ret;
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+
+ if (irq < 0 || irq >= MC13783_NUM_IRQ || !mc13783->irqhandler[irq] ||
+ mc13783->irqdata[irq] != dev)
return -EINVAL;
- mutex_lock(&mc13783->io_lock);
- mc13783->irq_handler[irq].handler = NULL;
- mutex_unlock(&mc13783->io_lock);
+ ret = mc13783_mask(mc13783, irq);
+ if (ret)
+ return ret;
+
+ mc13783->irqhandler[irq] = NULL;
+ mc13783->irqdata[irq] = NULL;
return 0;
}
-EXPORT_SYMBOL_GPL(mc13783_free_irq);
+EXPORT_SYMBOL(mc13783_irq_free);
-static void mc13783_irq_work(struct work_struct *work)
+static inline irqreturn_t mc13783_irqhandler(struct mc13783 *mc13783, int irq)
{
- struct mc13783 *mc13783 = container_of(work, struct mc13783, work);
- int i;
- unsigned int adc_sts;
-
- /* check if the adc has finished any completion */
- mc13783_reg_read(mc13783, MC13783_REG_INTERRUPT_STATUS_0, &adc_sts);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0,
- adc_sts & MC13783_INT_STAT_ADCDONEI);
-
- if (adc_sts & MC13783_INT_STAT_ADCDONEI)
- complete_all(&mc13783->adc_done);
-
- for (i = 0; i < MC13783_NUM_IRQ; i++)
- if (mc13783->irq_handler[i].handler)
- mc13783->irq_handler[i].handler(i,
- mc13783->irq_handler[i].data);
- enable_irq(mc13783->irq);
+ return mc13783->irqhandler[irq](irq, mc13783->irqdata[irq]);
}
-static irqreturn_t mc13783_interrupt(int irq, void *dev_id)
+int mc13783_ackirq(struct mc13783 *mc13783, int irq)
{
- struct mc13783 *mc13783 = dev_id;
+ unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
+ unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
- disable_irq_nosync(irq);
+ BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ);
- schedule_work(&mc13783->work);
- return IRQ_HANDLED;
+ return mc13783_reg_write(mc13783, offstat, val);
}
+EXPORT_SYMBOL(mc13783_ackirq);
-/* set adc to ts interrupt mode, which generates touchscreen wakeup interrupt */
-static inline void mc13783_adc_set_ts_irq_mode(struct mc13783 *mc13783)
+/*
+ * returns: number of handled irqs or negative error
+ * locking: holds mc13783->lock
+ */
+static int mc13783_irq_handle(struct mc13783 *mc13783,
+ unsigned int offstat, unsigned int offmask, int baseirq)
{
- unsigned int reg_adc0, reg_adc1;
+ u32 stat, mask;
+ int ret = mc13783_reg_read(mc13783, offstat, &stat);
+ int num_handled = 0;
+
+ if (ret)
+ return ret;
+
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
+
+ while (stat & ~mask) {
+ int irq = __ffs(stat & ~mask);
+
+ stat &= ~(1 << irq);
+
+ if (likely(mc13783->irqhandler[baseirq + irq])) {
+ irqreturn_t handled;
- reg_adc0 = MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE
- | MC13783_ADC0_TSMOD0;
- reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN;
+ handled = mc13783_irqhandler(mc13783, baseirq + irq);
+ if (handled == IRQ_HANDLED)
+ num_handled++;
+ } else {
+ dev_err(&mc13783->spidev->dev,
+ "BUG: irq %u but no handler\n",
+ baseirq + irq);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1);
+ mask |= 1 << irq;
+
+ ret = mc13783_reg_write(mc13783, offmask, mask);
+ }
+ }
+
+ return num_handled;
}
+static irqreturn_t mc13783_irq_thread(int irq, void *data)
+{
+ struct mc13783 *mc13783 = data;
+ irqreturn_t ret;
+ int handled = 0;
+
+ mc13783_lock(mc13783);
+
+ ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT0,
+ MC13783_IRQMASK0, MC13783_IRQ_ADCDONE);
+ if (ret > 0)
+ handled = 1;
+
+ ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT1,
+ MC13783_IRQMASK1, MC13783_IRQ_1HZ);
+ if (ret > 0)
+ handled = 1;
+
+ mc13783_unlock(mc13783);
+
+ return IRQ_RETVAL(handled);
+}
+
+#define MC13783_ADC1_CHAN0_SHIFT 5
+#define MC13783_ADC1_CHAN1_SHIFT 8
+
+struct mc13783_adcdone_data {
+ struct mc13783 *mc13783;
+ struct completion done;
+};
+
+static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
+{
+ struct mc13783_adcdone_data *adcdone_data = data;
+
+ mc13783_ackirq(adcdone_data->mc13783, irq);
+
+ complete_all(&adcdone_data->done);
+
+ return IRQ_HANDLED;
+}
+
+#define MC13783_ADC_WORKING (1 << 16)
+
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample)
{
- unsigned int reg_adc0, reg_adc1;
- int i;
+ u32 adc0, adc1, old_adc0;
+ int i, ret;
+ struct mc13783_adcdone_data adcdone_data = {
+ .mc13783 = mc13783,
+ };
+ init_completion(&adcdone_data.done);
+
+ dev_dbg(&mc13783->spidev->dev, "%s\n", __func__);
+
+ mc13783_lock(mc13783);
+
+ if (mc13783->flags & MC13783_ADC_WORKING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mc13783->flags |= MC13783_ADC_WORKING;
- mutex_lock(&mc13783->adc_conv_lock);
+ mc13783_reg_read(mc13783, MC13783_ADC0, &old_adc0);
- /* set up auto incrementing anyway to make quick read */
- reg_adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
- /* enable the adc, ignore external triggering and set ASC to trigger
- * conversion */
- reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN
- | MC13783_ADC1_ASC;
+ adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
+ adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC;
- /* setup channel number */
if (channel > 7)
- reg_adc1 |= MC13783_ADC1_ADSEL;
+ adc1 |= MC13783_ADC1_ADSEL;
switch (mode) {
case MC13783_ADC_MODE_TS:
- /* enables touch screen reference mode and set touchscreen mode
- * to position mode */
- reg_adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE
- | MC13783_ADC0_TSMOD0 | MC13783_ADC0_TSMOD1;
- reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_TSMOD0 |
+ MC13783_ADC0_TSMOD1;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
break;
+
case MC13783_ADC_MODE_SINGLE_CHAN:
- reg_adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
- reg_adc1 |= MC13783_ADC1_RAND;
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
+ adc1 |= MC13783_ADC1_RAND;
break;
+
case MC13783_ADC_MODE_MULT_CHAN:
- reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
break;
+
default:
+ mc13783_unlock(mc13783);
return -EINVAL;
}
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1);
+ dev_dbg(&mc13783->spidev->dev, "%s: request irq\n", __func__);
+ mc13783_irq_request(mc13783, MC13783_IRQ_ADCDONE,
+ mc13783_handler_adcdone, __func__, &adcdone_data);
+ mc13783_ackirq(mc13783, MC13783_IRQ_ADCDONE);
- wait_for_completion_interruptible(&mc13783->adc_done);
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_0, adc0);
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_1, adc1);
- for (i = 0; i < 4; i++)
- mc13783_reg_read(mc13783, MC13783_REG_ADC_2, &sample[i]);
+ mc13783_unlock(mc13783);
- if (mc13783->ts_active)
- mc13783_adc_set_ts_irq_mode(mc13783);
+ ret = wait_for_completion_interruptible_timeout(&adcdone_data.done, HZ);
- mutex_unlock(&mc13783->adc_conv_lock);
+ if (!ret)
+ ret = -ETIMEDOUT;
- return 0;
+ mc13783_lock(mc13783);
+
+ mc13783_irq_free(mc13783, MC13783_IRQ_ADCDONE, &adcdone_data);
+
+ if (ret > 0)
+ for (i = 0; i < 4; ++i) {
+ ret = mc13783_reg_read(mc13783,
+ MC13783_REG_ADC_2, &sample[i]);
+ if (ret)
+ break;
+ }
+
+ if (mode == MC13783_ADC_MODE_TS)
+ /* restore TSMOD */
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_0, old_adc0);
+
+ mc13783->flags &= ~MC13783_ADC_WORKING;
+out:
+ mc13783_unlock(mc13783);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status)
+static int mc13783_add_subdevice_pdata(struct mc13783 *mc13783,
+ const char *name, void *pdata, size_t pdata_size)
{
- mc13783->ts_active = status;
+ struct mfd_cell cell = {
+ .name = name,
+ .platform_data = pdata,
+ .data_size = pdata_size,
+ };
+
+ return mfd_add_devices(&mc13783->spidev->dev, -1, &cell, 1, NULL, 0);
+}
+
+static int mc13783_add_subdevice(struct mc13783 *mc13783, const char *name)
+{
+ return mc13783_add_subdevice_pdata(mc13783, name, NULL, 0);
}
-EXPORT_SYMBOL_GPL(mc13783_adc_set_ts_status);
static int mc13783_check_revision(struct mc13783 *mc13783)
{
u32 rev_id, rev1, rev2, finid, icid;
- mc13783_read(mc13783, MC13783_REG_REVISION, &rev_id);
+ mc13783_reg_read(mc13783, MC13783_REG_REVISION, &rev_id);
rev1 = (rev_id & 0x018) >> 3;
rev2 = (rev_id & 0x007);
@@ -292,38 +555,24 @@ static int mc13783_check_revision(struct mc13783 *mc13783)
rev1 = 3;
if (rev1 == 0 || icid != 2) {
- dev_err(mc13783->dev, "No MC13783 detected.\n");
+ dev_err(&mc13783->spidev->dev, "No MC13783 detected.\n");
return -ENODEV;
}
- mc13783->revision = ((rev1 * 10) + rev2);
- dev_info(mc13783->dev, "MC13783 Rev %d.%d FinVer %x detected\n", rev1,
- rev2, finid);
+ dev_info(&mc13783->spidev->dev,
+ "MC13783 Rev %d.%d FinVer %x detected\n",
+ rev1, rev2, finid);
return 0;
}
-/*
- * Register a client device. This is non-fatal since there is no need to
- * fail the entire device init due to a single platform device failing.
- */
-static void mc13783_client_dev_register(struct mc13783 *mc13783,
- const char *name)
-{
- struct mfd_cell cell = {};
-
- cell.name = name;
-
- mfd_add_devices(mc13783->dev, -1, &cell, 1, NULL, 0);
-}
-
-static int __devinit mc13783_probe(struct spi_device *spi)
+static int mc13783_probe(struct spi_device *spi)
{
struct mc13783 *mc13783;
- struct mc13783_platform_data *pdata = spi->dev.platform_data;
+ struct mc13783_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret;
- mc13783 = kzalloc(sizeof(struct mc13783), GFP_KERNEL);
+ mc13783 = kzalloc(sizeof(*mc13783), GFP_KERNEL);
if (!mc13783)
return -ENOMEM;
@@ -332,96 +581,104 @@ static int __devinit mc13783_probe(struct spi_device *spi)
spi->bits_per_word = 32;
spi_setup(spi);
- mc13783->spi_device = spi;
- mc13783->dev = &spi->dev;
- mc13783->irq = spi->irq;
+ mc13783->spidev = spi;
+
+ mutex_init(&mc13783->lock);
+ mc13783_lock(mc13783);
+
+ ret = mc13783_check_revision(mc13783);
+ if (ret)
+ goto err_revision;
+
+ /* mask all irqs */
+ ret = mc13783_reg_write(mc13783, MC13783_IRQMASK0, 0x00ffffff);
+ if (ret)
+ goto err_mask;
- INIT_WORK(&mc13783->work, mc13783_irq_work);
- mutex_init(&mc13783->io_lock);
- mutex_init(&mc13783->adc_conv_lock);
- init_completion(&mc13783->adc_done);
+ ret = mc13783_reg_write(mc13783, MC13783_IRQMASK1, 0x00ffffff);
+ if (ret)
+ goto err_mask;
+
+ ret = request_threaded_irq(spi->irq, NULL, mc13783_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13783", mc13783);
+
+ if (ret) {
+err_mask:
+err_revision:
+ mutex_unlock(&mc13783->lock);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13783);
+ return ret;
+ }
+ /* This should go away (BEGIN) */
if (pdata) {
mc13783->flags = pdata->flags;
mc13783->regulators = pdata->regulators;
mc13783->num_regulators = pdata->num_regulators;
}
+ /* This should go away (END) */
- if (mc13783_check_revision(mc13783)) {
- ret = -ENODEV;
- goto err_out;
+ if (pdata->flags & MC13783_USE_ADC)
+ mc13783_add_subdevice(mc13783, "mc13783-adc");
+
+ if (pdata->flags & MC13783_USE_CODEC)
+ mc13783_add_subdevice(mc13783, "mc13783-codec");
+
+ if (pdata->flags & MC13783_USE_REGULATOR) {
+ struct mc13783_regulator_platform_data regulator_pdata = {
+ .num_regulators = pdata->num_regulators,
+ .regulators = pdata->regulators,
+ };
+
+ mc13783_add_subdevice_pdata(mc13783, "mc13783-regulator",
+ &regulator_pdata, sizeof(regulator_pdata));
}
- /* clear and mask all interrupts */
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_0, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_1, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_1, 0x00ffffff);
+ if (pdata->flags & MC13783_USE_RTC)
+ mc13783_add_subdevice(mc13783, "mc13783-rtc");
- /* unmask adcdone interrupts */
- mc13783_set_bits(mc13783, MC13783_REG_INTERRUPT_MASK_0,
- MC13783_INT_MASK_ADCDONEM, 0);
+ if (pdata->flags & MC13783_USE_TOUCHSCREEN)
+ mc13783_add_subdevice(mc13783, "mc13783-ts");
- ret = request_irq(mc13783->irq, mc13783_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_HIGH, "mc13783",
- mc13783);
- if (ret)
- goto err_out;
-
- if (mc13783->flags & MC13783_USE_CODEC)
- mc13783_client_dev_register(mc13783, "mc13783-codec");
- if (mc13783->flags & MC13783_USE_ADC)
- mc13783_client_dev_register(mc13783, "mc13783-adc");
- if (mc13783->flags & MC13783_USE_RTC)
- mc13783_client_dev_register(mc13783, "mc13783-rtc");
- if (mc13783->flags & MC13783_USE_REGULATOR)
- mc13783_client_dev_register(mc13783, "mc13783-regulator");
- if (mc13783->flags & MC13783_USE_TOUCHSCREEN)
- mc13783_client_dev_register(mc13783, "mc13783-ts");
+ mc13783_unlock(mc13783);
return 0;
-
-err_out:
- kfree(mc13783);
- return ret;
}
static int __devexit mc13783_remove(struct spi_device *spi)
{
- struct mc13783 *mc13783;
+ struct mc13783 *mc13783 = dev_get_drvdata(&spi->dev);
- mc13783 = dev_get_drvdata(&spi->dev);
-
- free_irq(mc13783->irq, mc13783);
+ free_irq(mc13783->spidev->irq, mc13783);
mfd_remove_devices(&spi->dev);
return 0;
}
-static struct spi_driver pmic_driver = {
+static struct spi_driver mc13783_driver = {
.driver = {
- .name = "mc13783",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
+ .name = "mc13783",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
},
.probe = mc13783_probe,
.remove = __devexit_p(mc13783_remove),
};
-static int __init pmic_init(void)
+static int __init mc13783_init(void)
{
- return spi_register_driver(&pmic_driver);
+ return spi_register_driver(&mc13783_driver);
}
-subsys_initcall(pmic_init);
+subsys_initcall(mc13783_init);
-static void __exit pmic_exit(void)
+static void __exit mc13783_exit(void)
{
- spi_unregister_driver(&pmic_driver);
+ spi_unregister_driver(&mc13783_driver);
}
-module_exit(pmic_exit);
-
-MODULE_DESCRIPTION("Core/Protocol driver for Freescale MC13783 PMIC");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_LICENSE("GPL");
+module_exit(mc13783_exit);
+MODULE_DESCRIPTION("Core driver for Freescale MC13783 PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 3d31e97d6a45..6d2e8466df1d 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -209,17 +209,16 @@ static void pcf50633_adc_irq(int irq, void *data)
static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
{
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
struct pcf50633_adc *adc;
adc = kzalloc(sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
- adc->pcf = pdata->pcf;
+ adc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, adc);
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ADCRDY,
+ pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
pcf50633_adc_irq, adc);
mutex_init(&adc->queue_mutex);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index d26d7747175e..03dcc9200707 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -290,7 +290,7 @@ out:
int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
{
- dev_info(pcf->dev, "Masking IRQ %d\n", irq);
+ dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
return __pcf50633_irq_mask_set(pcf, irq, 1);
}
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
{
- dev_info(pcf->dev, "Unmasking IRQ %d\n", irq);
+ dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
return __pcf50633_irq_mask_set(pcf, irq, 0);
}
@@ -345,6 +345,9 @@ static void pcf50633_irq_worker(struct work_struct *work)
goto out;
}
+ /* defeat 8s death from lowsys on A5 */
+ pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
+
/* We immediately read the usb and adapter status. We thus make sure
* only of USBINS/USBREM IRQ handlers are called */
if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
@@ -453,7 +456,6 @@ static void
pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
struct platform_device **pdev)
{
- struct pcf50633_subdev_pdata *subdev_pdata;
int ret;
*pdev = platform_device_alloc(name, -1);
@@ -462,15 +464,6 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
return;
}
- subdev_pdata = kmalloc(sizeof(*subdev_pdata), GFP_KERNEL);
- if (!subdev_pdata) {
- dev_err(pcf->dev, "Error allocating subdev pdata\n");
- platform_device_put(*pdev);
- }
-
- subdev_pdata->pcf = pcf;
- platform_device_add_data(*pdev, subdev_pdata, sizeof(*subdev_pdata));
-
(*pdev)->dev.parent = pcf->dev;
ret = platform_device_add(*pdev);
@@ -482,13 +475,13 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
}
#ifdef CONFIG_PM
-static int pcf50633_suspend(struct device *dev, pm_message_t state)
+static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
{
struct pcf50633 *pcf;
int ret = 0, i;
u8 res[5];
- pcf = dev_get_drvdata(dev);
+ pcf = i2c_get_clientdata(client);
/* Make sure our interrupt handlers are not called
* henceforth */
@@ -523,12 +516,12 @@ out:
return ret;
}
-static int pcf50633_resume(struct device *dev)
+static int pcf50633_resume(struct i2c_client *client)
{
struct pcf50633 *pcf;
int ret;
- pcf = dev_get_drvdata(dev);
+ pcf = i2c_get_clientdata(client);
/* Write the saved mask registers */
ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
@@ -560,9 +553,14 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
{
struct pcf50633 *pcf;
struct pcf50633_platform_data *pdata = client->dev.platform_data;
- int i, ret = 0;
+ int i, ret;
int version, variant;
+ if (!client->irq) {
+ dev_err(&client->dev, "Missing IRQ\n");
+ return -ENOENT;
+ }
+
pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
@@ -577,6 +575,12 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf->irq = client->irq;
pcf->work_queue = create_singlethread_workqueue("pcf50633");
+ if (!pcf->work_queue) {
+ dev_err(&client->dev, "Failed to alloc workqueue\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
version = pcf50633_reg_read(pcf, 0);
@@ -584,7 +588,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err;
+ goto err_destroy_workqueue;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -598,6 +602,14 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
+ ret = request_irq(client->irq, pcf50633_irq,
+ IRQF_TRIGGER_LOW, "pcf50633", pcf);
+
+ if (ret) {
+ dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
+ goto err_destroy_workqueue;
+ }
+
/* Create sub devices */
pcf50633_client_dev_register(pcf, "pcf50633-input",
&pcf->input_pdev);
@@ -613,31 +625,18 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdev = platform_device_alloc("pcf50633-regltr", i);
if (!pdev) {
- dev_err(pcf->dev, "Cannot create regulator\n");
+ dev_err(pcf->dev, "Cannot create regulator %d\n", i);
continue;
}
pdev->dev.parent = pcf->dev;
- pdev->dev.platform_data = &pdata->reg_init_data[i];
- dev_set_drvdata(&pdev->dev, pcf);
+ platform_device_add_data(pdev, &pdata->reg_init_data[i],
+ sizeof(pdata->reg_init_data[i]));
pcf->regulator_pdev[i] = pdev;
platform_device_add(pdev);
}
- if (client->irq) {
- ret = request_irq(client->irq, pcf50633_irq,
- IRQF_TRIGGER_LOW, "pcf50633", pcf);
-
- if (ret) {
- dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
- goto err;
- }
- } else {
- dev_err(pcf->dev, "No IRQ configured\n");
- goto err;
- }
-
if (enable_irq_wake(client->irq) < 0)
dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
"in this hardware revision", client->irq);
@@ -651,9 +650,12 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
-err:
+err_destroy_workqueue:
destroy_workqueue(pcf->work_queue);
+err_free:
+ i2c_set_clientdata(client, NULL);
kfree(pcf);
+
return ret;
}
@@ -686,12 +688,12 @@ static struct i2c_device_id pcf50633_id_table[] = {
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
- .suspend = pcf50633_suspend,
- .resume = pcf50633_resume,
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = __devexit_p(pcf50633_remove),
+ .suspend = pcf50633_suspend,
+ .resume = pcf50633_resume,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index acf8b9d5f575..e5955306c2fa 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -637,7 +637,7 @@ static int tps65010_probe(struct i2c_client *client,
tps, DEBUG_FOPS);
/* optionally register GPIOs */
- if (board && board->base > 0) {
+ if (board && board->base != 0) {
tps->outmask = board->outmask;
tps->chip.label = client->name;
@@ -964,6 +964,34 @@ int tps65010_config_vregs1(unsigned value)
}
EXPORT_SYMBOL(tps65010_config_vregs1);
+int tps65010_config_vdcdc2(unsigned value)
+{
+ struct i2c_client *c;
+ int status;
+
+ if (!the_tps)
+ return -ENODEV;
+
+ c = the_tps->client;
+ mutex_lock(&the_tps->lock);
+
+ pr_debug("%s: vdcdc2 0x%02x\n", DRIVER_NAME,
+ i2c_smbus_read_byte_data(c, TPS_VDCDC2));
+
+ status = i2c_smbus_write_byte_data(c, TPS_VDCDC2, value);
+
+ if (status != 0)
+ printk(KERN_ERR "%s: Failed to write vdcdc2 register\n",
+ DRIVER_NAME);
+ else
+ pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME,
+ i2c_smbus_read_byte_data(c, TPS_VDCDC2));
+
+ mutex_unlock(&the_tps->lock);
+ return status;
+}
+EXPORT_SYMBOL(tps65010_config_vdcdc2);
+
/*-------------------------------------------------------------------------*/
/* tps65013_set_low_pwr parameter:
* mode: ON or OFF
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl-core.c
index 40449cdf09db..2a7606534196 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1,5 +1,6 @@
/*
- * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices
+ * twl_core.c - driver for TWL4030/TWL5030/TWL60X0/TPS659x0 PM
+ * and audio CODEC devices
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
@@ -36,7 +37,7 @@
#include <linux/regulator/machine.h>
#include <linux/i2c.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
#include <plat/cpu.h>
@@ -55,7 +56,7 @@
* (and associated registers).
*/
-#define DRIVER_NAME "twl4030"
+#define DRIVER_NAME "twl"
#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
@@ -125,7 +126,7 @@
/* Last - for index max*/
#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
-#define TWL4030_NUM_SLAVES 4
+#define TWL_NUM_SLAVES 4
#if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \
|| defined(CONFIG_INPUT_TWL4030_PWBUTTON_MODULE)
@@ -134,6 +135,13 @@
#define twl_has_pwrbutton() false
#endif
+#define SUB_CHIP_ID0 0
+#define SUB_CHIP_ID1 1
+#define SUB_CHIP_ID2 2
+#define SUB_CHIP_ID3 3
+
+#define TWL_MODULE_LAST TWL4030_MODULE_LAST
+
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -158,6 +166,10 @@
#define TWL4030_BASEADD_PWMB 0x00F1
#define TWL4030_BASEADD_KEYPAD 0x00D2
+#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */
+#define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's
+ one */
+
/* subchip/slave 3 - POWER ID */
#define TWL4030_BASEADD_BACKUP 0x0014
#define TWL4030_BASEADD_INT 0x002E
@@ -169,6 +181,30 @@
/* Triton Core internal information (END) */
+/* subchip/slave 0 0x48 - POWER */
+#define TWL6030_BASEADD_RTC 0x0000
+#define TWL6030_BASEADD_MEM 0x0017
+#define TWL6030_BASEADD_PM_MASTER 0x001F
+#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */
+#define TWL6030_BASEADD_PM_MISC 0x00E2
+#define TWL6030_BASEADD_PM_PUPD 0x00F0
+
+/* subchip/slave 1 0x49 - FEATURE */
+#define TWL6030_BASEADD_USB 0x0000
+#define TWL6030_BASEADD_GPADC_CTRL 0x002E
+#define TWL6030_BASEADD_AUX 0x0090
+#define TWL6030_BASEADD_PWM 0x00BA
+#define TWL6030_BASEADD_GASGAUGE 0x00C0
+#define TWL6030_BASEADD_PIH 0x00D0
+#define TWL6030_BASEADD_CHARGER 0x00E0
+
+/* subchip/slave 2 0x4A - DFT */
+#define TWL6030_BASEADD_DIEID 0x00C0
+
+/* subchip/slave 3 0x4B - AUDIO */
+#define TWL6030_BASEADD_AUDIO 0x0000
+#define TWL6030_BASEADD_RSV 0x0000
+
/* Few power values */
#define R_CFG_BOOT 0x05
#define R_PROTECT_KEY 0x0E
@@ -183,19 +219,29 @@
#define HFCLK_FREQ_26_MHZ (2 << 0)
#define HFCLK_FREQ_38p4_MHZ (3 << 0)
#define HIGH_PERF_SQ (1 << 3)
+#define CK32K_LOWPWR_EN (1 << 7)
/* chip-specific feature flags, for i2c_device_id.driver_data */
#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */
#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */
+#define TWL5031 BIT(2) /* twl5031 has different registers */
+#define TWL6030_CLASS BIT(3) /* TWL6030 class */
/*----------------------------------------------------------------------*/
/* is driver active, bound to a chip? */
static bool inuse;
-/* Structure for each TWL4030 Slave */
-struct twl4030_client {
+static unsigned int twl_id;
+unsigned int twl_rev(void)
+{
+ return twl_id;
+}
+EXPORT_SYMBOL(twl_rev);
+
+/* Structure for each TWL4030/TWL6030 Slave */
+struct twl_client {
struct i2c_client *client;
u8 address;
@@ -206,19 +252,20 @@ struct twl4030_client {
struct mutex xfer_lock;
};
-static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES];
+static struct twl_client twl_modules[TWL_NUM_SLAVES];
/* mapping the module id to slave id and base address */
-struct twl4030mapping {
+struct twl_mapping {
unsigned char sid; /* Slave ID */
unsigned char base; /* base address */
};
+struct twl_mapping *twl_map;
-static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
/*
* NOTE: don't change this table without updating the
- * <linux/i2c/twl4030.h> defines for TWL4030_MODULE_*
+ * <linux/i2c/twl.h> defines for TWL4030_MODULE_*
* so they continue to match the order in this table.
*/
@@ -240,6 +287,8 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
{ 2, TWL4030_BASEADD_PWM1 },
{ 2, TWL4030_BASEADD_PWMA },
{ 2, TWL4030_BASEADD_PWMB },
+ { 2, TWL5031_BASEADD_ACCESSORY },
+ { 2, TWL5031_BASEADD_INTERRUPTS },
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
@@ -249,12 +298,46 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
{ 3, TWL4030_BASEADD_SECURED_REG },
};
+static struct twl_mapping twl6030_map[] = {
+ /*
+ * NOTE: don't change this table without updating the
+ * <linux/i2c/twl.h> defines for TWL4030_MODULE_*
+ * so they continue to match the order in this table.
+ */
+ { SUB_CHIP_ID1, TWL6030_BASEADD_USB },
+ { SUB_CHIP_ID3, TWL6030_BASEADD_AUDIO },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_PIH },
+
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
+ { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_PWM },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
+
+ { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
+};
+
/*----------------------------------------------------------------------*/
/* Exported Functions */
/**
- * twl4030_i2c_write - Writes a n bit register in TWL4030
+ * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: an array of num_bytes+1 containing data to write
* @reg: register address (just offset will do)
@@ -265,19 +348,19 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
*
* Returns the result of operation - 0 is success
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
int sid;
- struct twl4030_client *twl;
+ struct twl_client *twl;
struct i2c_msg *msg;
- if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ if (unlikely(mod_no > TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl4030_map[mod_no].sid;
- twl = &twl4030_modules[sid];
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
if (unlikely(!inuse)) {
pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
@@ -294,19 +377,26 @@ int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
msg->flags = 0;
msg->buf = value;
/* over write the first byte of buffer with the register address */
- *value = twl4030_map[mod_no].base + reg;
+ *value = twl_map[mod_no].base + reg;
ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
mutex_unlock(&twl->xfer_lock);
- /* i2cTransfer returns num messages.translate it pls.. */
- if (ret >= 0)
- ret = 0;
- return ret;
+ /* i2c_transfer returns number of messages transferred */
+ if (ret != 1) {
+ pr_err("%s: i2c_write failed to transfer all messages\n",
+ DRIVER_NAME);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ } else {
+ return 0;
+ }
}
-EXPORT_SYMBOL(twl4030_i2c_write);
+EXPORT_SYMBOL(twl_i2c_write);
/**
- * twl4030_i2c_read - Reads a n bit register in TWL4030
+ * twl_i2c_read - Reads a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: an array of num_bytes containing data to be read
* @reg: register address (just offset will do)
@@ -314,20 +404,20 @@ EXPORT_SYMBOL(twl4030_i2c_write);
*
* Returns result of operation - num_bytes is success else failure.
*/
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
u8 val;
int sid;
- struct twl4030_client *twl;
+ struct twl_client *twl;
struct i2c_msg *msg;
- if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ if (unlikely(mod_no > TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl4030_map[mod_no].sid;
- twl = &twl4030_modules[sid];
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
if (unlikely(!inuse)) {
pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
@@ -339,7 +429,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
msg->addr = twl->address;
msg->len = 1;
msg->flags = 0; /* Read the register value */
- val = twl4030_map[mod_no].base + reg;
+ val = twl_map[mod_no].base + reg;
msg->buf = &val;
/* [MSG2] fill the data rx buffer */
msg = &twl->xfer_msg[1];
@@ -350,45 +440,52 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
mutex_unlock(&twl->xfer_lock);
- /* i2cTransfer returns num messages.translate it pls.. */
- if (ret >= 0)
- ret = 0;
- return ret;
+ /* i2c_transfer returns number of messages transferred */
+ if (ret != 2) {
+ pr_err("%s: i2c_read failed to transfer all messages\n",
+ DRIVER_NAME);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ } else {
+ return 0;
+ }
}
-EXPORT_SYMBOL(twl4030_i2c_read);
+EXPORT_SYMBOL(twl_i2c_read);
/**
- * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030
+ * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: the value to be written 8 bit
* @reg: register address (just offset will do)
*
* Returns result of operation - 0 is success
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
+int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
{
/* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
u8 temp_buffer[2] = { 0 };
/* offset 1 contains the data */
temp_buffer[1] = value;
- return twl4030_i2c_write(mod_no, temp_buffer, reg, 1);
+ return twl_i2c_write(mod_no, temp_buffer, reg, 1);
}
-EXPORT_SYMBOL(twl4030_i2c_write_u8);
+EXPORT_SYMBOL(twl_i2c_write_u8);
/**
- * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030
+ * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: the value read 8 bit
* @reg: register address (just offset will do)
*
* Returns result of operation - 0 is success
*/
-int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
+int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
{
- return twl4030_i2c_read(mod_no, value, reg, 1);
+ return twl_i2c_read(mod_no, value, reg, 1);
}
-EXPORT_SYMBOL(twl4030_i2c_read_u8);
+EXPORT_SYMBOL(twl_i2c_read_u8);
/*----------------------------------------------------------------------*/
@@ -398,7 +495,7 @@ add_numbered_child(unsigned chip, const char *name, int num,
bool can_wakeup, int irq0, int irq1)
{
struct platform_device *pdev;
- struct twl4030_client *twl = &twl4030_modules[chip];
+ struct twl_client *twl = &twl_modules[chip];
int status;
pdev = platform_device_alloc(name, num);
@@ -456,6 +553,7 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
struct regulator_consumer_supply *consumers,
unsigned num_consumers)
{
+ unsigned sub_chip_id;
/* regulator framework demands init_data ... */
if (!pdata)
return NULL;
@@ -466,7 +564,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
}
/* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(3, "twl4030_reg", num,
+ sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
+ return add_numbered_child(sub_chip_id, "twl_reg", num,
pdata, sizeof(*pdata), false, 0, 0);
}
@@ -486,29 +585,32 @@ static int
add_children(struct twl4030_platform_data *pdata, unsigned long features)
{
struct device *child;
+ unsigned sub_chip_id;
- if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
+ if (twl_has_bci() && pdata->bci &&
+ !(features & (TPS_SUBSET | TWL5031))) {
child = add_child(3, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci),
false,
/* irq0 = CHG_PRES, irq1 = BCI */
- pdata->irq_base + 8 + 1, pdata->irq_base + 2);
+ pdata->irq_base + BCI_PRES_INTR_OFFSET,
+ pdata->irq_base + BCI_INTR_OFFSET);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (twl_has_gpio() && pdata->gpio) {
- child = add_child(1, "twl4030_gpio",
+ child = add_child(SUB_CHIP_ID1, "twl4030_gpio",
pdata->gpio, sizeof(*pdata->gpio),
- false, pdata->irq_base + 0, 0);
+ false, pdata->irq_base + GPIO_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (twl_has_keypad() && pdata->keypad) {
- child = add_child(2, "twl4030_keypad",
+ child = add_child(SUB_CHIP_ID2, "twl4030_keypad",
pdata->keypad, sizeof(*pdata->keypad),
- true, pdata->irq_base + 1, 0);
+ true, pdata->irq_base + KEYPAD_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
@@ -516,7 +618,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
if (twl_has_madc() && pdata->madc) {
child = add_child(2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
- true, pdata->irq_base + 3, 0);
+ true, pdata->irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
@@ -529,14 +631,15 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
* Eventually, Linux might become more aware of such
* HW security concerns, and "least privilege".
*/
- child = add_child(3, "twl4030_rtc",
+ sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
+ child = add_child(sub_chip_id, "twl_rtc",
NULL, 0,
- true, pdata->irq_base + 8 + 3, 0);
+ true, pdata->irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (twl_has_usb() && pdata->usb) {
+ if (twl_has_usb() && pdata->usb && twl_class_is_4030()) {
static struct regulator_consumer_supply usb1v5 = {
.supply = "usb1v5",
@@ -581,7 +684,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
pdata->usb, sizeof(*pdata->usb),
true,
/* irq0 = USB_PRES, irq1 = USB */
- pdata->irq_base + 8 + 2, pdata->irq_base + 4);
+ pdata->irq_base + USB_PRES_INTR_OFFSET,
+ pdata->irq_base + USB_INTR_OFFSET);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -615,12 +719,23 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
- if (twl_has_regulator()) {
- /*
+ /* twl4030 regulators */
+ if (twl_has_regulator() && twl_class_is_4030()) {
child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1);
if (IS_ERR(child))
return PTR_ERR(child);
- */
+
+ child = add_regulator(TWL4030_REG_VIO, pdata->vio);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1);
if (IS_ERR(child))
@@ -636,10 +751,23 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
pdata->vaux2);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
/* maybe add LDOs that are omitted on cost-reduced parts */
- if (twl_has_regulator() && !(features & TPS_SUBSET)) {
+ if (twl_has_regulator() && !(features & TPS_SUBSET)
+ && twl_class_is_4030()) {
child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -665,6 +793,49 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
+ /* twl6030 regulators */
+ if (twl_has_regulator() && twl_class_is_6030()) {
+ child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VPP, pdata->vpp);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VANA, pdata->vana);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VDAC, pdata->vdac);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VUSB, pdata->vusb);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
return 0;
}
@@ -679,7 +850,7 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK,
+ e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_LOCK,
R_PROTECT_KEY);
return e;
}
@@ -688,14 +859,15 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1,
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK1,
R_PROTECT_KEY);
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2,
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK2,
R_PROTECT_KEY);
return e;
}
-static void clocks_init(struct device *dev)
+static void clocks_init(struct device *dev,
+ struct twl4030_clock_init_data *clock)
{
int e = 0;
struct clk *osc;
@@ -709,7 +881,7 @@ static void clocks_init(struct device *dev)
osc = clk_get(dev, "osc_sys_ck");
if (IS_ERR(osc)) {
- printk(KERN_WARNING "Skipping twl4030 internal clock init and "
+ printk(KERN_WARNING "Skipping twl internal clock init and "
"using bootloader value (unknown osc rate)\n");
return;
}
@@ -723,7 +895,7 @@ static void clocks_init(struct device *dev)
*/
osc = ERR_PTR(-EIO);
- printk(KERN_WARNING "Skipping twl4030 internal clock init and "
+ printk(KERN_WARNING "Skipping twl internal clock init and "
"using bootloader value (unknown osc rate)\n");
return;
@@ -742,9 +914,12 @@ static void clocks_init(struct device *dev)
}
ctrl |= HIGH_PERF_SQ;
+ if (clock && clock->ck32k_lowpwr_enable)
+ ctrl |= CK32K_LOWPWR_EN;
+
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
e |= protect_pm_master();
if (e < 0)
@@ -753,24 +928,31 @@ static void clocks_init(struct device *dev)
/*----------------------------------------------------------------------*/
-int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
-int twl_exit_irq(void);
+int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl4030_exit_irq(void);
+int twl4030_init_chip_irq(const char *chip);
+int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl6030_exit_irq(void);
-static int twl4030_remove(struct i2c_client *client)
+static int twl_remove(struct i2c_client *client)
{
unsigned i;
int status;
- status = twl_exit_irq();
+ if (twl_class_is_4030())
+ status = twl4030_exit_irq();
+ else
+ status = twl6030_exit_irq();
+
if (status < 0)
return status;
- for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
- struct twl4030_client *twl = &twl4030_modules[i];
+ for (i = 0; i < TWL_NUM_SLAVES; i++) {
+ struct twl_client *twl = &twl_modules[i];
if (twl->client && twl->client != client)
i2c_unregister_device(twl->client);
- twl4030_modules[i].client = NULL;
+ twl_modules[i].client = NULL;
}
inuse = false;
return 0;
@@ -778,7 +960,7 @@ static int twl4030_remove(struct i2c_client *client)
/* NOTE: this driver only handles a single twl4030/tps659x0 chip */
static int __init
-twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
+twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int status;
unsigned i;
@@ -799,8 +981,8 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EBUSY;
}
- for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
- struct twl4030_client *twl = &twl4030_modules[i];
+ for (i = 0; i < TWL_NUM_SLAVES; i++) {
+ struct twl_client *twl = &twl_modules[i];
twl->address = client->addr + i;
if (i == 0)
@@ -814,15 +996,20 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = -ENOMEM;
goto fail;
}
- strlcpy(twl->client->name, id->name,
- sizeof(twl->client->name));
}
mutex_init(&twl->xfer_lock);
}
inuse = true;
+ if ((id->driver_data) & TWL6030_CLASS) {
+ twl_id = TWL6030_CLASS_ID;
+ twl_map = &twl6030_map[0];
+ } else {
+ twl_id = TWL4030_CLASS_ID;
+ twl_map = &twl4030_map[0];
+ }
/* setup clock framework */
- clocks_init(&client->dev);
+ clocks_init(&client->dev, pdata->clock);
/* load power event scripts */
if (twl_has_power() && pdata->power)
@@ -832,7 +1019,15 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->irq
&& pdata->irq_base
&& pdata->irq_end > pdata->irq_base) {
- status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
+ if (twl_class_is_4030()) {
+ twl4030_init_chip_irq(id->name);
+ status = twl4030_init_irq(client->irq, pdata->irq_base,
+ pdata->irq_end);
+ } else {
+ status = twl6030_init_irq(client->irq, pdata->irq_base,
+ pdata->irq_end);
+ }
+
if (status < 0)
goto fail;
}
@@ -840,40 +1035,42 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = add_children(pdata, id->driver_data);
fail:
if (status < 0)
- twl4030_remove(client);
+ twl_remove(client);
return status;
}
-static const struct i2c_device_id twl4030_ids[] = {
+static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
+ { "twl5031", TWL5031 }, /* TWL5030 updated */
{ "tps65950", 0 }, /* catalog version of twl5030 */
{ "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
{ "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
+ { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(i2c, twl4030_ids);
+MODULE_DEVICE_TABLE(i2c, twl_ids);
/* One Client Driver , 4 Clients */
-static struct i2c_driver twl4030_driver = {
+static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
- .id_table = twl4030_ids,
- .probe = twl4030_probe,
- .remove = twl4030_remove,
+ .id_table = twl_ids,
+ .probe = twl_probe,
+ .remove = twl_remove,
};
-static int __init twl4030_init(void)
+static int __init twl_init(void)
{
- return i2c_add_driver(&twl4030_driver);
+ return i2c_add_driver(&twl_driver);
}
-subsys_initcall(twl4030_init);
+subsys_initcall(twl_init);
-static void __exit twl4030_exit(void)
+static void __exit twl_exit(void)
{
- i2c_del_driver(&twl4030_driver);
+ i2c_del_driver(&twl_driver);
}
-module_exit(twl4030_exit);
+module_exit(twl_exit);
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("I2C Core interface for TWL4030");
+MODULE_DESCRIPTION("I2C Core interface for TWL");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 77b914907d7c..700b149c1b91 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -26,7 +26,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl4030-codec.h>
@@ -56,7 +56,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
u8 val;
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
codec->resource[id].reg);
if (enable)
@@ -64,7 +64,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
else
val &= ~codec->resource[id].mask;
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
val, codec->resource[id].reg);
return val;
@@ -75,7 +75,7 @@ static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
u8 val;
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
codec->resource[id].reg);
return val;
@@ -183,7 +183,7 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid audio_mclk\n");
return -EINVAL;
}
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
val, TWL4030_REG_APLL_CTL);
codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index fb194fe244c1..20d29bafc9f5 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -32,7 +32,7 @@
#include <linux/irq.h>
#include <linux/kthread.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -74,6 +74,8 @@ struct sih {
u8 edr_offset;
u8 bytes_edr; /* bytelen of EDR */
+ u8 irq_lines; /* number of supported irq lines */
+
/* SIR ignored -- set interrupt, for testing only */
struct irq_data {
u8 isr_offset;
@@ -82,6 +84,9 @@ struct sih {
/* + 2 bytes padding */
};
+static const struct sih *sih_modules;
+static int nr_sih_modules;
+
#define SIH_INITIALIZER(modname, nbits) \
.module = TWL4030_MODULE_ ## modname, \
.control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
@@ -89,6 +94,7 @@ struct sih {
.bytes_ixr = DIV_ROUND_UP(nbits, 8), \
.edr_offset = TWL4030_ ## modname ## _EDR, \
.bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
+ .irq_lines = 2, \
.mask = { { \
.isr_offset = TWL4030_ ## modname ## _ISR1, \
.imr_offset = TWL4030_ ## modname ## _IMR1, \
@@ -107,7 +113,8 @@ struct sih {
/* Order in this table matches order in PIH_ISR. That is,
* BIT(n) in PIH_ISR is sih_modules[n].
*/
-static const struct sih sih_modules[6] = {
+/* sih_modules_twl4030 is used both in twl4030 and twl5030 */
+static const struct sih sih_modules_twl4030[6] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
@@ -118,6 +125,7 @@ static const struct sih sih_modules[6] = {
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
+ .irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
@@ -140,6 +148,7 @@ static const struct sih sih_modules[6] = {
.edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 3,
+ .irq_lines = 2,
.mask = { {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
@@ -164,6 +173,99 @@ static const struct sih sih_modules[6] = {
/* there are no SIH modules #6 or #7 ... */
};
+static const struct sih sih_modules_twl5031[8] = {
+ [0] = {
+ .name = "gpio",
+ .module = TWL4030_MODULE_GPIO,
+ .control_offset = REG_GPIO_SIH_CTRL,
+ .set_cor = true,
+ .bits = TWL4030_GPIO_MAX,
+ .bytes_ixr = 3,
+ /* Note: *all* of these IRQs default to no-trigger */
+ .edr_offset = REG_GPIO_EDR1,
+ .bytes_edr = 5,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = REG_GPIO_ISR1A,
+ .imr_offset = REG_GPIO_IMR1A,
+ }, {
+ .isr_offset = REG_GPIO_ISR1B,
+ .imr_offset = REG_GPIO_IMR1B,
+ }, },
+ },
+ [1] = {
+ .name = "keypad",
+ .set_cor = true,
+ SIH_INITIALIZER(KEYPAD_KEYP, 4)
+ },
+ [2] = {
+ .name = "bci",
+ .module = TWL5031_MODULE_INTERRUPTS,
+ .control_offset = TWL5031_INTERRUPTS_BCISIHCTRL,
+ .bits = 7,
+ .bytes_ixr = 1,
+ .edr_offset = TWL5031_INTERRUPTS_BCIEDR1,
+ /* Note: most of these IRQs default to no-trigger */
+ .bytes_edr = 2,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = TWL5031_INTERRUPTS_BCIISR1,
+ .imr_offset = TWL5031_INTERRUPTS_BCIIMR1,
+ }, {
+ .isr_offset = TWL5031_INTERRUPTS_BCIISR2,
+ .imr_offset = TWL5031_INTERRUPTS_BCIIMR2,
+ }, },
+ },
+ [3] = {
+ .name = "madc",
+ SIH_INITIALIZER(MADC, 4)
+ },
+ [4] = {
+ /* USB doesn't use the same SIH organization */
+ .name = "usb",
+ },
+ [5] = {
+ .name = "power",
+ .set_cor = true,
+ SIH_INITIALIZER(INT_PWR, 8)
+ },
+ [6] = {
+ /*
+ * ACI doesn't use the same SIH organization.
+ * For example, it supports only one interrupt line
+ */
+ .name = "aci",
+ .module = TWL5031_MODULE_ACCESSORY,
+ .bits = 9,
+ .bytes_ixr = 2,
+ .irq_lines = 1,
+ .mask = { {
+ .isr_offset = TWL5031_ACIIDR_LSB,
+ .imr_offset = TWL5031_ACIIMR_LSB,
+ }, },
+
+ },
+ [7] = {
+ /* Accessory */
+ .name = "acc",
+ .module = TWL5031_MODULE_ACCESSORY,
+ .control_offset = TWL5031_ACCSIHCTRL,
+ .bits = 2,
+ .bytes_ixr = 1,
+ .edr_offset = TWL5031_ACCEDR1,
+ /* Note: most of these IRQs default to no-trigger */
+ .bytes_edr = 1,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = TWL5031_ACCISR1,
+ .imr_offset = TWL5031_ACCIMR1,
+ }, {
+ .isr_offset = TWL5031_ACCISR2,
+ .imr_offset = TWL5031_ACCIMR2,
+ }, },
+ },
+};
+
#undef TWL4030_MODULE_KEYPAD_KEYP
#undef TWL4030_MODULE_INT_PWR
#undef TWL4030_INT_PWR_EDR
@@ -194,7 +296,7 @@ static int twl4030_irq_thread(void *data)
/* Wait for IRQ, then read PIH irq status (also blocking) */
wait_for_completion_interruptible(&irq_event);
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n",
@@ -284,13 +386,17 @@ static int twl4030_init_sih_modules(unsigned line)
/* disable all interrupts on our line */
memset(buf, 0xff, sizeof buf);
sih = sih_modules;
- for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+ for (i = 0; i < nr_sih_modules; i++, sih++) {
/* skip USB -- it's funky */
if (!sih->bytes_ixr)
continue;
- status = twl4030_i2c_write(sih->module, buf,
+ /* Not all the SIH modules support multiple interrupt lines */
+ if (sih->irq_lines <= line)
+ continue;
+
+ status = twl_i2c_write(sih->module, buf,
sih->mask[line].imr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
@@ -304,7 +410,7 @@ static int twl4030_init_sih_modules(unsigned line)
* And for PWR_INT it's not documented...
*/
if (sih->set_cor) {
- status = twl4030_i2c_write_u8(sih->module,
+ status = twl_i2c_write_u8(sih->module,
TWL4030_SIH_CTRL_COR_MASK,
sih->control_offset);
if (status < 0)
@@ -314,7 +420,7 @@ static int twl4030_init_sih_modules(unsigned line)
}
sih = sih_modules;
- for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+ for (i = 0; i < nr_sih_modules; i++, sih++) {
u8 rxbuf[4];
int j;
@@ -322,20 +428,24 @@ static int twl4030_init_sih_modules(unsigned line)
if (!sih->bytes_ixr)
continue;
+ /* Not all the SIH modules support multiple interrupt lines */
+ if (sih->irq_lines <= line)
+ continue;
+
/* Clear pending interrupt status. Either the read was
* enough, or we need to write those bits. Repeat, in
* case an IRQ is pending (PENDDIS=0) ... that's not
* uncommon with PWR_INT.PWRON.
*/
for (j = 0; j < 2; j++) {
- status = twl4030_i2c_read(sih->module, rxbuf,
+ status = twl_i2c_read(sih->module, rxbuf,
sih->mask[line].isr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "ISR");
if (!sih->set_cor)
- status = twl4030_i2c_write(sih->module, buf,
+ status = twl_i2c_write(sih->module, buf,
sih->mask[line].isr_offset,
sih->bytes_ixr);
/* else COR=1 means read sufficed.
@@ -404,7 +514,7 @@ static void twl4030_sih_do_mask(struct work_struct *work)
return;
/* write the whole mask ... simpler than subsetting it */
- status = twl4030_i2c_write(sih->module, imr.bytes,
+ status = twl_i2c_write(sih->module, imr.bytes,
sih->mask[irq_line].imr_offset, sih->bytes_ixr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -435,7 +545,7 @@ static void twl4030_sih_do_edge(struct work_struct *work)
* any processor on the other IRQ line, EDR registers are
* shared.
*/
- status = twl4030_i2c_read(sih->module, bytes + 1,
+ status = twl_i2c_read(sih->module, bytes + 1,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -469,7 +579,7 @@ static void twl4030_sih_do_edge(struct work_struct *work)
}
/* Write */
- status = twl4030_i2c_write(sih->module, bytes,
+ status = twl_i2c_write(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -554,7 +664,7 @@ static inline int sih_read_isr(const struct sih *sih)
/* FIXME need retry-on-error ... */
isr.word = 0;
- status = twl4030_i2c_read(sih->module, isr.bytes,
+ status = twl_i2c_read(sih->module, isr.bytes,
sih->mask[irq_line].isr_offset, sih->bytes_ixr);
return (status < 0) ? status : le32_to_cpu(isr.word);
@@ -611,7 +721,7 @@ int twl4030_sih_setup(int module)
/* only support modules with standard clear-on-read for now */
for (sih_mod = 0, sih = sih_modules;
- sih_mod < ARRAY_SIZE(sih_modules);
+ sih_mod < nr_sih_modules;
sih_mod++, sih++) {
if (sih->module == module && sih->set_cor) {
if (!WARN((irq_base + sih->bits) > NR_IRQS,
@@ -668,7 +778,7 @@ int twl4030_sih_setup(int module)
/* FIXME pass in which interrupt line we'll use ... */
#define twl_irq_line 0
-int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
static struct irq_chip twl4030_irq_chip;
@@ -728,7 +838,8 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
goto fail_rqirq;
}
- task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq");
+ task = kthread_run(twl4030_irq_thread, (void *)(long)irq_num,
+ "twl4030-irq");
if (IS_ERR(task)) {
pr_err("twl4030: could not create irq %d thread!\n", irq_num);
status = PTR_ERR(task);
@@ -747,7 +858,7 @@ fail:
return status;
}
-int twl_exit_irq(void)
+int twl4030_exit_irq(void)
{
/* FIXME undo twl_init_irq() */
if (twl4030_irq_base) {
@@ -756,3 +867,16 @@ int twl_exit_irq(void)
}
return 0;
}
+
+int twl4030_init_chip_irq(const char *chip)
+{
+ if (!strcmp(chip, "twl5031")) {
+ sih_modules = sih_modules_twl5031;
+ nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031);
+ } else {
+ sih_modules = sih_modules_twl4030;
+ nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030);
+ }
+
+ return 0;
+}
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index d423e0c4176b..0815292fdafc 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -26,7 +26,7 @@
#include <linux/module.h>
#include <linux/pm.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
@@ -67,19 +67,35 @@ static u8 twl4030_start_script_address = 0x2b;
#define R_KEY_1 0xC0
#define R_KEY_2 0x0C
-/* resource configuration registers */
-
-#define DEVGROUP_OFFSET 0
+/* resource configuration registers
+ <RESOURCE>_DEV_GRP at address 'n+0'
+ <RESOURCE>_TYPE at address 'n+1'
+ <RESOURCE>_REMAP at address 'n+2'
+ <RESOURCE>_DEDICATED at address 'n+3'
+*/
+#define DEV_GRP_OFFSET 0
#define TYPE_OFFSET 1
+#define REMAP_OFFSET 2
+#define DEDICATED_OFFSET 3
+
+/* Bit positions in the registers */
+
+/* <RESOURCE>_DEV_GRP */
+#define DEV_GRP_SHIFT 5
+#define DEV_GRP_MASK (7 << DEV_GRP_SHIFT)
-/* Bit positions */
-#define DEVGROUP_SHIFT 5
-#define DEVGROUP_MASK (7 << DEVGROUP_SHIFT)
+/* <RESOURCE>_TYPE */
#define TYPE_SHIFT 0
#define TYPE_MASK (7 << TYPE_SHIFT)
#define TYPE2_SHIFT 3
#define TYPE2_MASK (3 << TYPE2_SHIFT)
+/* <RESOURCE>_REMAP */
+#define SLEEP_STATE_SHIFT 0
+#define SLEEP_STATE_MASK (0xf << SLEEP_STATE_SHIFT)
+#define OFF_STATE_SHIFT 4
+#define OFF_STATE_MASK (0xf << OFF_STATE_SHIFT)
+
static u8 res_config_addrs[] = {
[RES_VAUX1] = 0x17,
[RES_VAUX2] = 0x1b,
@@ -115,11 +131,11 @@ static int __init twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_MEMORY_ADDRESS);
if (err)
goto out;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
R_MEMORY_DATA);
out:
return err;
@@ -176,18 +192,18 @@ static int __init twl4030_config_wakeup3_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P3_SW_EVENTS);
out:
if (err)
@@ -201,42 +217,42 @@ static int __init twl4030_config_wakeup12_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
R_CFG_P1_TRANSITION);
if (err)
goto out;
@@ -254,7 +270,7 @@ static int __init twl4030_config_sleep_sequence(u8 address)
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_A2S);
if (err)
@@ -269,41 +285,41 @@ static int __init twl4030_config_warmreset_sequence(u8 address)
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P3_SW_EVENTS);
out:
if (err)
@@ -317,6 +333,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
int err;
u8 type;
u8 grp;
+ u8 remap;
if (rconfig->resource > TOTAL_RESOURCES) {
pr_err("TWL4030 Resource %d does not exist\n",
@@ -327,19 +344,19 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
- rconfig_addr + DEVGROUP_OFFSET);
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+ rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
rconfig->resource);
return err;
}
- if (rconfig->devgroup >= 0) {
- grp &= ~DEVGROUP_MASK;
- grp |= rconfig->devgroup << DEVGROUP_SHIFT;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
- grp, rconfig_addr + DEVGROUP_OFFSET);
+ if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
+ grp &= ~DEV_GRP_MASK;
+ grp |= rconfig->devgroup << DEV_GRP_SHIFT;
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
return err;
@@ -347,7 +364,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set resource types */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
@@ -355,23 +372,50 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
return err;
}
- if (rconfig->type >= 0) {
+ if (rconfig->type != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE_MASK;
type |= rconfig->type << TYPE_SHIFT;
}
- if (rconfig->type2 >= 0) {
+ if (rconfig->type2 != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE2_MASK;
type |= rconfig->type2 << TYPE2_SHIFT;
}
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
return err;
}
+ /* Set remap states */
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+ rconfig_addr + REMAP_OFFSET);
+ if (err < 0) {
+ pr_err("TWL4030 Resource %d remap could not be read\n",
+ rconfig->resource);
+ return err;
+ }
+
+ if (rconfig->remap_off != TWL4030_RESCONFIG_UNDEF) {
+ remap &= ~OFF_STATE_MASK;
+ remap |= rconfig->remap_off << OFF_STATE_SHIFT;
+ }
+
+ if (rconfig->remap_sleep != TWL4030_RESCONFIG_UNDEF) {
+ remap &= ~SLEEP_STATE_MASK;
+ remap |= rconfig->remap_off << SLEEP_STATE_SHIFT;
+ }
+
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ remap,
+ rconfig_addr + REMAP_OFFSET);
+ if (err < 0) {
+ pr_err("TWL4030 failed to program remap\n");
+ return err;
+ }
+
return 0;
}
@@ -424,12 +468,12 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
struct twl4030_resconfig *resconfig;
u8 address = twl4030_start_script_address;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
R_PROTECT_KEY);
if (err)
goto unlock;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
R_PROTECT_KEY);
if (err)
goto unlock;
@@ -452,7 +496,7 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
}
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
new file mode 100644
index 000000000000..10bf228ad626
--- /dev/null
+++ b/drivers/mfd/twl6030-irq.c
@@ -0,0 +1,299 @@
+/*
+ * twl6030-irq.c - TWL6030 irq support
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * TWL6030 specific code and IRQ handling changes by
+ * Jagadeesh Bhaskar Pakaravoor <j-pakaravoor@ti.com>
+ * Balaji T K <balajitk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/i2c/twl.h>
+
+/*
+ * TWL6030 (unlike its predecessors, which had two level interrupt handling)
+ * three interrupt registers INT_STS_A, INT_STS_B and INT_STS_C.
+ * It exposes status bits saying who has raised an interrupt. There are
+ * three mask registers that corresponds to these status registers, that
+ * enables/disables these interrupts.
+ *
+ * We set up IRQs starting at a platform-specified base. An interrupt map table,
+ * specifies mapping between interrupt number and the associated module.
+ *
+ */
+
+static int twl6030_interrupt_mapping[24] = {
+ PWR_INTR_OFFSET, /* Bit 0 PWRON */
+ PWR_INTR_OFFSET, /* Bit 1 RPWRON */
+ PWR_INTR_OFFSET, /* Bit 2 BAT_VLOW */
+ RTC_INTR_OFFSET, /* Bit 3 RTC_ALARM */
+ RTC_INTR_OFFSET, /* Bit 4 RTC_PERIOD */
+ HOTDIE_INTR_OFFSET, /* Bit 5 HOT_DIE */
+ SMPSLDO_INTR_OFFSET, /* Bit 6 VXXX_SHORT */
+ SMPSLDO_INTR_OFFSET, /* Bit 7 VMMC_SHORT */
+
+ SMPSLDO_INTR_OFFSET, /* Bit 8 VUSIM_SHORT */
+ BATDETECT_INTR_OFFSET, /* Bit 9 BAT */
+ SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */
+ MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */
+ RSV_INTR_OFFSET, /* Bit 12 Reserved */
+ MADC_INTR_OFFSET, /* Bit 13 GPADC_RT_EOC */
+ MADC_INTR_OFFSET, /* Bit 14 GPADC_SW_EOC */
+ GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */
+
+ USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 18 ID */
+ USBOTG_INTR_OFFSET, /* Bit 19 VBUS */
+ CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
+ CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */
+ CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */
+ RSV_INTR_OFFSET, /* Bit 23 Reserved */
+};
+/*----------------------------------------------------------------------*/
+
+static unsigned twl6030_irq_base;
+
+static struct completion irq_event;
+
+/*
+ * This thread processes interrupts reported by the Primary Interrupt Handler.
+ */
+static int twl6030_irq_thread(void *data)
+{
+ long irq = (long)data;
+ static unsigned i2c_errors;
+ static const unsigned max_i2c_errors = 100;
+ int ret;
+
+ current->flags |= PF_NOFREEZE;
+
+ while (!kthread_should_stop()) {
+ int i;
+ union {
+ u8 bytes[4];
+ u32 int_sts;
+ } sts;
+
+ /* Wait for IRQ, then read PIH irq status (also blocking) */
+ wait_for_completion_interruptible(&irq_event);
+
+ /* read INT_STS_A, B and C in one shot using a burst read */
+ ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
+ REG_INT_STS_A, 3);
+ if (ret) {
+ pr_warning("twl6030: I2C error %d reading PIH ISR\n",
+ ret);
+ if (++i2c_errors >= max_i2c_errors) {
+ printk(KERN_ERR "Maximum I2C error count"
+ " exceeded. Terminating %s.\n",
+ __func__);
+ break;
+ }
+ complete(&irq_event);
+ continue;
+ }
+
+
+
+ sts.bytes[3] = 0; /* Only 24 bits are valid*/
+
+ for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
+ local_irq_disable();
+ if (sts.int_sts & 0x1) {
+ int module_irq = twl6030_irq_base +
+ twl6030_interrupt_mapping[i];
+ struct irq_desc *d = irq_to_desc(module_irq);
+
+ if (!d) {
+ pr_err("twl6030: Invalid SIH IRQ: %d\n",
+ module_irq);
+ return -EINVAL;
+ }
+
+ /* These can't be masked ... always warn
+ * if we get any surprises.
+ */
+ if (d->status & IRQ_DISABLED)
+ note_interrupt(module_irq, d,
+ IRQ_NONE);
+ else
+ d->handle_irq(module_irq, d);
+
+ }
+ local_irq_enable();
+ }
+ ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes,
+ REG_INT_STS_A, 3); /* clear INT_STS_A */
+ if (ret)
+ pr_warning("twl6030: I2C error in clearing PIH ISR\n");
+
+ enable_irq(irq);
+ }
+
+ return 0;
+}
+
+/*
+ * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt.
+ * This is a chained interrupt, so there is no desc->action method for it.
+ * Now we need to query the interrupt controller in the twl6030 to determine
+ * which module is generating the interrupt request. However, we can't do i2c
+ * transactions in interrupt context, so we must defer that work to a kernel
+ * thread. All we do here is acknowledge and mask the interrupt and wakeup
+ * the kernel thread.
+ */
+static irqreturn_t handle_twl6030_pih(int irq, void *devid)
+{
+ disable_irq_nosync(irq);
+ complete(devid);
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static inline void activate_irq(int irq)
+{
+#ifdef CONFIG_ARM
+ /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ /* same effect on other architectures */
+ set_irq_noprobe(irq);
+#endif
+}
+
+/*----------------------------------------------------------------------*/
+
+static unsigned twl6030_irq_next;
+
+/*----------------------------------------------------------------------*/
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset)
+{
+ int ret;
+ u8 unmask_value;
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &unmask_value,
+ REG_INT_STS_A + offset);
+ unmask_value &= (~(bit_mask));
+ ret |= twl_i2c_write_u8(TWL_MODULE_PIH, unmask_value,
+ REG_INT_STS_A + offset); /* unmask INT_MSK_A/B/C */
+ return ret;
+}
+EXPORT_SYMBOL(twl6030_interrupt_unmask);
+
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset)
+{
+ int ret;
+ u8 mask_value;
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &mask_value,
+ REG_INT_STS_A + offset);
+ mask_value |= (bit_mask);
+ ret |= twl_i2c_write_u8(TWL_MODULE_PIH, mask_value,
+ REG_INT_STS_A + offset); /* mask INT_MSK_A/B/C */
+ return ret;
+}
+EXPORT_SYMBOL(twl6030_interrupt_mask);
+
+int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+{
+
+ int status = 0;
+ int i;
+ struct task_struct *task;
+ int ret;
+ u8 mask[4];
+
+ static struct irq_chip twl6030_irq_chip;
+ mask[1] = 0xFF;
+ mask[2] = 0xFF;
+ mask[3] = 0xFF;
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_MSK_LINE_A, 3); /* MASK ALL INT LINES */
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_MSK_STS_A, 3); /* MASK ALL INT STS */
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */
+
+ twl6030_irq_base = irq_base;
+
+ /* install an irq handler for each of the modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+ twl6030_irq_chip = dummy_irq_chip;
+ twl6030_irq_chip.name = "twl6030";
+ twl6030_irq_chip.set_type = NULL;
+
+ for (i = irq_base; i < irq_end; i++) {
+ set_irq_chip_and_handler(i, &twl6030_irq_chip,
+ handle_simple_irq);
+ activate_irq(i);
+ }
+
+ twl6030_irq_next = i;
+ pr_info("twl6030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
+ irq_num, irq_base, twl6030_irq_next - 1);
+
+ /* install an irq handler to demultiplex the TWL6030 interrupt */
+ init_completion(&irq_event);
+ task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
+ if (IS_ERR(task)) {
+ pr_err("twl6030: could not create irq %d thread!\n", irq_num);
+ status = PTR_ERR(task);
+ goto fail_kthread;
+ }
+
+ status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
+ "TWL6030-PIH", &irq_event);
+ if (status < 0) {
+ pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
+ goto fail_irq;
+ }
+ return status;
+fail_irq:
+ free_irq(irq_num, &irq_event);
+
+fail_kthread:
+ for (i = irq_base; i < irq_end; i++)
+ set_irq_chip_and_handler(i, NULL, NULL);
+ return status;
+}
+
+int twl6030_exit_irq(void)
+{
+
+ if (twl6030_irq_base) {
+ pr_err("twl6030: can't yet clean up IRQs?\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
+
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 7f27576ca046..4b2021af1d96 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -90,9 +90,10 @@ int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
EXPORT_SYMBOL_GPL(wm831x_isinkv_values);
enum wm831x_parent {
- WM8310 = 0,
- WM8311 = 1,
- WM8312 = 2,
+ WM8310 = 0x8310,
+ WM8311 = 0x8311,
+ WM8312 = 0x8312,
+ WM8320 = 0x8320,
};
static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
@@ -478,6 +479,20 @@ static struct resource wm831x_dcdc4_resources[] = {
},
};
+static struct resource wm8320_dcdc4_buck_resources[] = {
+ {
+ .start = WM831X_DC4_CONTROL,
+ .end = WM832X_DC4_SLEEP_CONTROL,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "UV",
+ .start = WM831X_IRQ_UV_DC4,
+ .end = WM831X_IRQ_UV_DC4,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource wm831x_gpio_resources[] = {
{
.start = WM831X_IRQ_GPIO_1,
@@ -794,6 +809,9 @@ static struct resource wm831x_wdt_resources[] = {
static struct mfd_cell wm8310_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -947,6 +965,9 @@ static struct mfd_cell wm8310_devs[] = {
static struct mfd_cell wm8311_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -1081,6 +1102,9 @@ static struct mfd_cell wm8311_devs[] = {
static struct mfd_cell wm8312_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -1237,6 +1261,137 @@ static struct mfd_cell wm8312_devs[] = {
},
};
+static struct mfd_cell wm8320_devs[] = {
+ {
+ .name = "wm831x-backup",
+ },
+ {
+ .name = "wm831x-buckv",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
+ .resources = wm831x_dcdc1_resources,
+ },
+ {
+ .name = "wm831x-buckv",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources),
+ .resources = wm831x_dcdc2_resources,
+ },
+ {
+ .name = "wm831x-buckp",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources),
+ .resources = wm831x_dcdc3_resources,
+ },
+ {
+ .name = "wm831x-buckp",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(wm8320_dcdc4_buck_resources),
+ .resources = wm8320_dcdc4_buck_resources,
+ },
+ {
+ .name = "wm831x-gpio",
+ .num_resources = ARRAY_SIZE(wm831x_gpio_resources),
+ .resources = wm831x_gpio_resources,
+ },
+ {
+ .name = "wm831x-hwmon",
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_ldo1_resources),
+ .resources = wm831x_ldo1_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_ldo2_resources),
+ .resources = wm831x_ldo2_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(wm831x_ldo3_resources),
+ .resources = wm831x_ldo3_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(wm831x_ldo4_resources),
+ .resources = wm831x_ldo4_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 5,
+ .num_resources = ARRAY_SIZE(wm831x_ldo5_resources),
+ .resources = wm831x_ldo5_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 6,
+ .num_resources = ARRAY_SIZE(wm831x_ldo6_resources),
+ .resources = wm831x_ldo6_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 7,
+ .num_resources = ARRAY_SIZE(wm831x_ldo7_resources),
+ .resources = wm831x_ldo7_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 8,
+ .num_resources = ARRAY_SIZE(wm831x_ldo8_resources),
+ .resources = wm831x_ldo8_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 9,
+ .num_resources = ARRAY_SIZE(wm831x_ldo9_resources),
+ .resources = wm831x_ldo9_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 10,
+ .num_resources = ARRAY_SIZE(wm831x_ldo10_resources),
+ .resources = wm831x_ldo10_resources,
+ },
+ {
+ .name = "wm831x-alive-ldo",
+ .id = 11,
+ .num_resources = ARRAY_SIZE(wm831x_ldo11_resources),
+ .resources = wm831x_ldo11_resources,
+ },
+ {
+ .name = "wm831x-on",
+ .num_resources = ARRAY_SIZE(wm831x_on_resources),
+ .resources = wm831x_on_resources,
+ },
+ {
+ .name = "wm831x-rtc",
+ .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
+ .resources = wm831x_rtc_resources,
+ },
+ {
+ .name = "wm831x-status",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_status1_resources),
+ .resources = wm831x_status1_resources,
+ },
+ {
+ .name = "wm831x-status",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_status2_resources),
+ .resources = wm831x_status2_resources,
+ },
+ {
+ .name = "wm831x-watchdog",
+ .num_resources = ARRAY_SIZE(wm831x_wdt_resources),
+ .resources = wm831x_wdt_resources,
+ },
+};
+
static struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
@@ -1282,50 +1437,37 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
goto err;
}
+ /* Some engineering samples do not have the ID set, rely on
+ * the device being registered correctly.
+ */
+ if (ret == 0) {
+ dev_info(wm831x->dev, "Device is an engineering sample\n");
+ ret = id;
+ }
+
switch (ret) {
- case 0x8310:
+ case WM8310:
parent = WM8310;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8310 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8310 revision %c\n", 'A' + rev);
break;
- case 0x8311:
+ case WM8311:
parent = WM8311;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8311 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8311 revision %c\n", 'A' + rev);
break;
- case 0x8312:
+ case WM8312:
parent = WM8312;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8312 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8312 revision %c\n", 'A' + rev);
break;
- case 0:
- /* Some engineering samples do not have the ID set,
- * rely on the device being registered correctly.
- * This will need revisiting for future devices with
- * multiple dies.
- */
- parent = id;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM831%d ES revision %c\n",
- parent, 'A' + rev);
- break;
- }
+ case WM8320:
+ parent = WM8320;
+ wm831x->num_gpio = 12;
+ dev_info(wm831x->dev, "WM8320 revision %c\n", 'A' + rev);
break;
default:
@@ -1338,7 +1480,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
* current parts.
*/
if (parent != id)
- dev_warn(wm831x->dev, "Device was registered as a WM831%lu\n",
+ dev_warn(wm831x->dev, "Device was registered as a WM%lx\n",
id);
/* Bootstrap the user key */
@@ -1371,18 +1513,24 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
ret = mfd_add_devices(wm831x->dev, -1,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
- NULL, 0);
+ NULL, wm831x->irq_base);
break;
case WM8311:
ret = mfd_add_devices(wm831x->dev, -1,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
- NULL, 0);
+ NULL, wm831x->irq_base);
break;
case WM8312:
ret = mfd_add_devices(wm831x->dev, -1,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
+ NULL, wm831x->irq_base);
+ break;
+
+ case WM8320:
+ ret = mfd_add_devices(wm831x->dev, -1,
+ wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, 0);
break;
@@ -1399,7 +1547,8 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
if (pdata && pdata->backlight) {
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, -1, backlight_devs,
- ARRAY_SIZE(backlight_devs), NULL, 0);
+ ARRAY_SIZE(backlight_devs), NULL,
+ wm831x->irq_base);
if (ret < 0)
dev_err(wm831x->dev, "Failed to add backlight: %d\n",
ret);
@@ -1511,6 +1660,7 @@ static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
{ "wm8312", WM8312 },
+ { "wm8320", WM8320 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index ac056ea6b66e..301327697117 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
@@ -339,110 +340,71 @@ static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data)
return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
}
-static void __wm831x_enable_irq(struct wm831x *wm831x, int irq)
+static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
+ int irq)
{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- wm831x->irq_masks[irq_data->reg - 1] &= ~irq_data->mask;
- wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data),
- wm831x->irq_masks[irq_data->reg - 1]);
+ return &wm831x_irqs[irq - wm831x->irq_base];
}
-void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+static void wm831x_irq_lock(unsigned int irq)
{
- mutex_lock(&wm831x->irq_lock);
- __wm831x_enable_irq(wm831x, irq);
- mutex_unlock(&wm831x->irq_lock);
-}
-EXPORT_SYMBOL_GPL(wm831x_enable_irq);
+ struct wm831x *wm831x = get_irq_chip_data(irq);
-static void __wm831x_disable_irq(struct wm831x *wm831x, int irq)
-{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- wm831x->irq_masks[irq_data->reg - 1] |= irq_data->mask;
- wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data),
- wm831x->irq_masks[irq_data->reg - 1]);
-}
-
-void wm831x_disable_irq(struct wm831x *wm831x, int irq)
-{
mutex_lock(&wm831x->irq_lock);
- __wm831x_disable_irq(wm831x, irq);
- mutex_unlock(&wm831x->irq_lock);
}
-EXPORT_SYMBOL_GPL(wm831x_disable_irq);
-int wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev)
+static void wm831x_irq_sync_unlock(unsigned int irq)
{
- int ret = 0;
-
- if (irq < 0 || irq >= WM831X_NUM_IRQS)
- return -EINVAL;
-
- mutex_lock(&wm831x->irq_lock);
-
- if (wm831x_irqs[irq].handler) {
- dev_err(wm831x->dev, "Already have handler for IRQ %d\n", irq);
- ret = -EINVAL;
- goto out;
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) {
+ wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i];
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1_MASK + i,
+ wm831x->irq_masks_cur[i]);
+ }
}
- wm831x_irqs[irq].handler = handler;
- wm831x_irqs[irq].handler_data = dev;
-
- __wm831x_enable_irq(wm831x, irq);
-
-out:
mutex_unlock(&wm831x->irq_lock);
-
- return ret;
}
-EXPORT_SYMBOL_GPL(wm831x_request_irq);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int irq, void *data)
+static void wm831x_irq_unmask(unsigned int irq)
{
- if (irq < 0 || irq >= WM831X_NUM_IRQS)
- return;
-
- mutex_lock(&wm831x->irq_lock);
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
- wm831x_irqs[irq].handler = NULL;
- wm831x_irqs[irq].handler_data = NULL;
-
- __wm831x_disable_irq(wm831x, irq);
-
- mutex_unlock(&wm831x->irq_lock);
+ wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-EXPORT_SYMBOL_GPL(wm831x_free_irq);
-
-static void wm831x_handle_irq(struct wm831x *wm831x, int irq, int status)
+static void wm831x_irq_mask(unsigned int irq)
{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- if (irq_data->handler) {
- irq_data->handler(irq, irq_data->handler_data);
- wm831x_reg_write(wm831x, irq_data_to_status_reg(irq_data),
- irq_data->mask);
- } else {
- dev_err(wm831x->dev, "Unhandled IRQ %d, masking\n", irq);
- __wm831x_disable_irq(wm831x, irq);
- }
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+
+ wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
-/* Main interrupt handling occurs in a workqueue since we need
- * interrupts enabled to interact with the chip. */
-static void wm831x_irq_worker(struct work_struct *work)
+static struct irq_chip wm831x_irq_chip = {
+ .name = "wm831x",
+ .bus_lock = wm831x_irq_lock,
+ .bus_sync_unlock = wm831x_irq_sync_unlock,
+ .mask = wm831x_irq_mask,
+ .unmask = wm831x_irq_unmask,
+};
+
+/* The processing of the primary interrupt occurs in a thread so that
+ * we can interact with the device over I2C or SPI. */
+static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
- struct wm831x *wm831x = container_of(work, struct wm831x, irq_work);
+ struct wm831x *wm831x = data;
unsigned int i;
int primary;
- int status_regs[5];
- int read[5] = { 0 };
+ int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
+ int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
primary = wm831x_reg_read(wm831x, WM831X_SYSTEM_INTERRUPTS);
@@ -452,8 +414,6 @@ static void wm831x_irq_worker(struct work_struct *work)
goto out;
}
- mutex_lock(&wm831x->irq_lock);
-
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
@@ -471,41 +431,34 @@ static void wm831x_irq_worker(struct work_struct *work)
dev_err(wm831x->dev,
"Failed to read IRQ status: %d\n",
*status);
- goto out_lock;
+ goto out;
}
- /* Mask out the disabled IRQs */
- *status &= ~wm831x->irq_masks[offset];
read[offset] = 1;
}
- if (*status & wm831x_irqs[i].mask)
- wm831x_handle_irq(wm831x, i, *status);
+ /* Report it if it isn't masked, or forget the status. */
+ if ((*status & ~wm831x->irq_masks_cur[offset])
+ & wm831x_irqs[i].mask)
+ handle_nested_irq(wm831x->irq_base + i);
+ else
+ *status &= ~wm831x_irqs[i].mask;
}
-out_lock:
- mutex_unlock(&wm831x->irq_lock);
out:
- enable_irq(wm831x->irq);
-}
-
-
-static irqreturn_t wm831x_cpu_irq(int irq, void *data)
-{
- struct wm831x *wm831x = data;
-
- /* Shut the interrupt to the CPU up and schedule the actual
- * handler; we can't check that the IRQ is asserted. */
- disable_irq_nosync(irq);
-
- queue_work(wm831x->irq_wq, &wm831x->irq_work);
+ for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
+ if (status_regs[i])
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
+ status_regs[i]);
+ }
return IRQ_HANDLED;
}
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
- int i, ret;
+ struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ int i, cur_irq, ret;
mutex_init(&wm831x->irq_lock);
@@ -515,41 +468,53 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
-
- wm831x->irq_wq = create_singlethread_workqueue("wm831x-irq");
- if (!wm831x->irq_wq) {
- dev_err(wm831x->dev, "Failed to allocate IRQ worker\n");
- return -ESRCH;
+ if (!pdata || !pdata->irq_base) {
+ dev_err(wm831x->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
}
wm831x->irq = irq;
- INIT_WORK(&wm831x->irq_work, wm831x_irq_worker);
+ wm831x->irq_base = pdata->irq_base;
/* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks); i++) {
- wm831x->irq_masks[i] = 0xffff;
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ wm831x->irq_masks_cur[i] = 0xffff;
+ wm831x->irq_masks_cache[i] = 0xffff;
wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
0xffff);
}
- /* Enable top level interrupts, we mask at secondary level */
- wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
+ /* Register them with genirq */
+ for (cur_irq = wm831x->irq_base;
+ cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm831x);
+ set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
- /* We're good to go. We set IRQF_SHARED since there's a
- * chance the driver will interoperate with another driver but
- * the need to disable the IRQ while handing via I2C/SPI means
- * that this may break and performance will be impacted. If
- * this does happen it's a hardware design issue and the only
- * other alternative would be polling.
- */
- ret = request_irq(irq, wm831x_cpu_irq, IRQF_TRIGGER_LOW | IRQF_SHARED,
- "wm831x", wm831x);
+ ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "wm831x", wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
return ret;
}
+ /* Enable top level interrupts, we mask at secondary level */
+ wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
+
return 0;
}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index ba27c9dc1ad3..8485a7018060 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -337,733 +337,6 @@ int wm8350_reg_unlock(struct wm8350 *wm8350)
}
EXPORT_SYMBOL_GPL(wm8350_reg_unlock);
-static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
-{
- mutex_lock(&wm8350->irq_mutex);
-
- if (wm8350->irq[irq].handler)
- wm8350->irq[irq].handler(wm8350, irq, wm8350->irq[irq].data);
- else {
- dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n",
- irq);
- wm8350_mask_irq(wm8350, irq);
- }
-
- mutex_unlock(&wm8350->irq_mutex);
-}
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads.
- */
-static irqreturn_t wm8350_irq(int irq, void *data)
-{
- struct wm8350 *wm8350 = data;
- u16 level_one, status1, status2, comp;
-
- /* TODO: Use block reads to improve performance? */
- level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS)
- & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK);
- status1 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_1)
- & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_1_MASK);
- status2 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_2)
- & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_2_MASK);
- comp = wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS)
- & ~wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK);
-
- /* over current */
- if (level_one & WM8350_OC_INT) {
- u16 oc;
-
- oc = wm8350_reg_read(wm8350, WM8350_OVER_CURRENT_INT_STATUS);
- oc &= ~wm8350_reg_read(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK);
-
- if (oc & WM8350_OC_LS_EINT) /* limit switch */
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_OC_LS);
- }
-
- /* under voltage */
- if (level_one & WM8350_UV_INT) {
- u16 uv;
-
- uv = wm8350_reg_read(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS);
- uv &= ~wm8350_reg_read(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK);
-
- if (uv & WM8350_UV_DC1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC1);
- if (uv & WM8350_UV_DC2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC2);
- if (uv & WM8350_UV_DC3_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC3);
- if (uv & WM8350_UV_DC4_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC4);
- if (uv & WM8350_UV_DC5_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC5);
- if (uv & WM8350_UV_DC6_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC6);
- if (uv & WM8350_UV_LDO1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO1);
- if (uv & WM8350_UV_LDO2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO2);
- if (uv & WM8350_UV_LDO3_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO3);
- if (uv & WM8350_UV_LDO4_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO4);
- }
-
- /* charger, RTC */
- if (status1) {
- if (status1 & WM8350_CHG_BAT_HOT_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_HOT);
- if (status1 & WM8350_CHG_BAT_COLD_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_COLD);
- if (status1 & WM8350_CHG_BAT_FAIL_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_FAIL);
- if (status1 & WM8350_CHG_TO_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_TO);
- if (status1 & WM8350_CHG_END_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_END);
- if (status1 & WM8350_CHG_START_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_START);
- if (status1 & WM8350_CHG_FAST_RDY_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_FAST_RDY);
- if (status1 & WM8350_CHG_VBATT_LT_3P9_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_3P9);
- if (status1 & WM8350_CHG_VBATT_LT_3P1_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_3P1);
- if (status1 & WM8350_CHG_VBATT_LT_2P85_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_2P85);
- if (status1 & WM8350_RTC_ALM_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_ALM);
- if (status1 & WM8350_RTC_SEC_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_SEC);
- if (status1 & WM8350_RTC_PER_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_PER);
- }
-
- /* current sink, system, aux adc */
- if (status2) {
- if (status2 & WM8350_CS1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS1);
- if (status2 & WM8350_CS2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS2);
-
- if (status2 & WM8350_SYS_HYST_COMP_FAIL_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_HYST_COMP_FAIL);
- if (status2 & WM8350_SYS_CHIP_GT115_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_CHIP_GT115);
- if (status2 & WM8350_SYS_CHIP_GT140_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_CHIP_GT140);
- if (status2 & WM8350_SYS_WDOG_TO_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_WDOG_TO);
-
- if (status2 & WM8350_AUXADC_DATARDY_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DATARDY);
- if (status2 & WM8350_AUXADC_DCOMP4_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP4);
- if (status2 & WM8350_AUXADC_DCOMP3_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP3);
- if (status2 & WM8350_AUXADC_DCOMP2_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP2);
- if (status2 & WM8350_AUXADC_DCOMP1_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP1);
-
- if (status2 & WM8350_USB_LIMIT_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_USB_LIMIT);
- }
-
- /* wake, codec, ext */
- if (comp) {
- if (comp & WM8350_WKUP_OFF_STATE_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_OFF_STATE);
- if (comp & WM8350_WKUP_HIB_STATE_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_HIB_STATE);
- if (comp & WM8350_WKUP_CONV_FAULT_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_CONV_FAULT);
- if (comp & WM8350_WKUP_WDOG_RST_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_WDOG_RST);
- if (comp & WM8350_WKUP_GP_PWR_ON_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_GP_PWR_ON);
- if (comp & WM8350_WKUP_ONKEY_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_WKUP_ONKEY);
- if (comp & WM8350_WKUP_GP_WAKEUP_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_GP_WAKEUP);
-
- if (comp & WM8350_CODEC_JCK_DET_L_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_JCK_DET_L);
- if (comp & WM8350_CODEC_JCK_DET_R_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_JCK_DET_R);
- if (comp & WM8350_CODEC_MICSCD_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_MICSCD);
- if (comp & WM8350_CODEC_MICD_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CODEC_MICD);
-
- if (comp & WM8350_EXT_USB_FB_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_USB_FB);
- if (comp & WM8350_EXT_WALL_FB_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_EXT_WALL_FB);
- if (comp & WM8350_EXT_BAT_FB_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_BAT_FB);
- }
-
- if (level_one & WM8350_GP_INT) {
- int i;
- u16 gpio;
-
- gpio = wm8350_reg_read(wm8350, WM8350_GPIO_INT_STATUS);
- gpio &= ~wm8350_reg_read(wm8350,
- WM8350_GPIO_INT_STATUS_MASK);
-
- for (i = 0; i < 12; i++) {
- if (gpio & (1 << i))
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_GPIO(i));
- }
- }
-
- return IRQ_HANDLED;
-}
-
-int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data)
-{
- if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (wm8350->irq[irq].handler)
- return -EBUSY;
-
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = handler;
- wm8350->irq[irq].data = data;
- mutex_unlock(&wm8350->irq_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_register_irq);
-
-int wm8350_free_irq(struct wm8350 *wm8350, int irq)
-{
- if (irq < 0 || irq > WM8350_NUM_IRQ)
- return -EINVAL;
-
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = NULL;
- mutex_unlock(&wm8350->irq_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_free_irq);
-
-int wm8350_mask_irq(struct wm8350 *wm8350, int irq)
-{
- switch (irq) {
- case WM8350_IRQ_CHG_BAT_HOT:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_HOT_EINT);
- case WM8350_IRQ_CHG_BAT_COLD:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_COLD_EINT);
- case WM8350_IRQ_CHG_BAT_FAIL:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_FAIL_EINT);
- case WM8350_IRQ_CHG_TO:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_TO_EINT);
- case WM8350_IRQ_CHG_END:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_END_EINT);
- case WM8350_IRQ_CHG_START:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_START_EINT);
- case WM8350_IRQ_CHG_FAST_RDY:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_FAST_RDY_EINT);
- case WM8350_IRQ_RTC_PER:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_PER_EINT);
- case WM8350_IRQ_RTC_SEC:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_SEC_EINT);
- case WM8350_IRQ_RTC_ALM:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_ALM_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P9:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P9_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P1_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_2P85:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_2P85_EINT);
- case WM8350_IRQ_CS1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS1_EINT);
- case WM8350_IRQ_CS2:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS2_EINT);
- case WM8350_IRQ_USB_LIMIT:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_USB_LIMIT_EINT);
- case WM8350_IRQ_AUXADC_DATARDY:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DATARDY_EINT);
- case WM8350_IRQ_AUXADC_DCOMP4:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP4_EINT);
- case WM8350_IRQ_AUXADC_DCOMP3:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP3_EINT);
- case WM8350_IRQ_AUXADC_DCOMP2:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP2_EINT);
- case WM8350_IRQ_AUXADC_DCOMP1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP1_EINT);
- case WM8350_IRQ_SYS_HYST_COMP_FAIL:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_HYST_COMP_FAIL_EINT);
- case WM8350_IRQ_SYS_CHIP_GT115:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT115_EINT);
- case WM8350_IRQ_SYS_CHIP_GT140:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT140_EINT);
- case WM8350_IRQ_SYS_WDOG_TO:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_WDOG_TO_EINT);
- case WM8350_IRQ_UV_LDO4:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO4_EINT);
- case WM8350_IRQ_UV_LDO3:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO3_EINT);
- case WM8350_IRQ_UV_LDO2:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO2_EINT);
- case WM8350_IRQ_UV_LDO1:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO1_EINT);
- case WM8350_IRQ_UV_DC6:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC6_EINT);
- case WM8350_IRQ_UV_DC5:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC5_EINT);
- case WM8350_IRQ_UV_DC4:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC4_EINT);
- case WM8350_IRQ_UV_DC3:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC3_EINT);
- case WM8350_IRQ_UV_DC2:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC2_EINT);
- case WM8350_IRQ_UV_DC1:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC1_EINT);
- case WM8350_IRQ_OC_LS:
- return wm8350_set_bits(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK,
- WM8350_IM_OC_LS_EINT);
- case WM8350_IRQ_EXT_USB_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_USB_FB_EINT);
- case WM8350_IRQ_EXT_WALL_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_WALL_FB_EINT);
- case WM8350_IRQ_EXT_BAT_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_BAT_FB_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_L:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_L_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_R:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_R_EINT);
- case WM8350_IRQ_CODEC_MICSCD:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICSCD_EINT);
- case WM8350_IRQ_CODEC_MICD:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICD_EINT);
- case WM8350_IRQ_WKUP_OFF_STATE:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_HIB_STATE:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_HIB_STATE_EINT);
- case WM8350_IRQ_WKUP_CONV_FAULT:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_CONV_FAULT_EINT);
- case WM8350_IRQ_WKUP_WDOG_RST:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_GP_PWR_ON:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_PWR_ON_EINT);
- case WM8350_IRQ_WKUP_ONKEY:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_ONKEY_EINT);
- case WM8350_IRQ_WKUP_GP_WAKEUP:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_WAKEUP_EINT);
- case WM8350_IRQ_GPIO(0):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP0_EINT);
- case WM8350_IRQ_GPIO(1):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP1_EINT);
- case WM8350_IRQ_GPIO(2):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP2_EINT);
- case WM8350_IRQ_GPIO(3):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP3_EINT);
- case WM8350_IRQ_GPIO(4):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP4_EINT);
- case WM8350_IRQ_GPIO(5):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP5_EINT);
- case WM8350_IRQ_GPIO(6):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP6_EINT);
- case WM8350_IRQ_GPIO(7):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP7_EINT);
- case WM8350_IRQ_GPIO(8):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP8_EINT);
- case WM8350_IRQ_GPIO(9):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP9_EINT);
- case WM8350_IRQ_GPIO(10):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP10_EINT);
- case WM8350_IRQ_GPIO(11):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP11_EINT);
- case WM8350_IRQ_GPIO(12):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP12_EINT);
- default:
- dev_warn(wm8350->dev, "Attempting to mask unknown IRQ %d\n",
- irq);
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_mask_irq);
-
-int wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
-{
- switch (irq) {
- case WM8350_IRQ_CHG_BAT_HOT:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_HOT_EINT);
- case WM8350_IRQ_CHG_BAT_COLD:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_COLD_EINT);
- case WM8350_IRQ_CHG_BAT_FAIL:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_FAIL_EINT);
- case WM8350_IRQ_CHG_TO:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_TO_EINT);
- case WM8350_IRQ_CHG_END:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_END_EINT);
- case WM8350_IRQ_CHG_START:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_START_EINT);
- case WM8350_IRQ_CHG_FAST_RDY:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_FAST_RDY_EINT);
- case WM8350_IRQ_RTC_PER:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_PER_EINT);
- case WM8350_IRQ_RTC_SEC:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_SEC_EINT);
- case WM8350_IRQ_RTC_ALM:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_ALM_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P9:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P9_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P1_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_2P85:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_2P85_EINT);
- case WM8350_IRQ_CS1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS1_EINT);
- case WM8350_IRQ_CS2:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS2_EINT);
- case WM8350_IRQ_USB_LIMIT:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_USB_LIMIT_EINT);
- case WM8350_IRQ_AUXADC_DATARDY:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DATARDY_EINT);
- case WM8350_IRQ_AUXADC_DCOMP4:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP4_EINT);
- case WM8350_IRQ_AUXADC_DCOMP3:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP3_EINT);
- case WM8350_IRQ_AUXADC_DCOMP2:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP2_EINT);
- case WM8350_IRQ_AUXADC_DCOMP1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP1_EINT);
- case WM8350_IRQ_SYS_HYST_COMP_FAIL:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_HYST_COMP_FAIL_EINT);
- case WM8350_IRQ_SYS_CHIP_GT115:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT115_EINT);
- case WM8350_IRQ_SYS_CHIP_GT140:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT140_EINT);
- case WM8350_IRQ_SYS_WDOG_TO:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_WDOG_TO_EINT);
- case WM8350_IRQ_UV_LDO4:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO4_EINT);
- case WM8350_IRQ_UV_LDO3:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO3_EINT);
- case WM8350_IRQ_UV_LDO2:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO2_EINT);
- case WM8350_IRQ_UV_LDO1:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO1_EINT);
- case WM8350_IRQ_UV_DC6:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC6_EINT);
- case WM8350_IRQ_UV_DC5:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC5_EINT);
- case WM8350_IRQ_UV_DC4:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC4_EINT);
- case WM8350_IRQ_UV_DC3:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC3_EINT);
- case WM8350_IRQ_UV_DC2:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC2_EINT);
- case WM8350_IRQ_UV_DC1:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC1_EINT);
- case WM8350_IRQ_OC_LS:
- return wm8350_clear_bits(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK,
- WM8350_IM_OC_LS_EINT);
- case WM8350_IRQ_EXT_USB_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_USB_FB_EINT);
- case WM8350_IRQ_EXT_WALL_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_WALL_FB_EINT);
- case WM8350_IRQ_EXT_BAT_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_BAT_FB_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_L:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_L_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_R:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_R_EINT);
- case WM8350_IRQ_CODEC_MICSCD:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICSCD_EINT);
- case WM8350_IRQ_CODEC_MICD:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICD_EINT);
- case WM8350_IRQ_WKUP_OFF_STATE:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_HIB_STATE:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_HIB_STATE_EINT);
- case WM8350_IRQ_WKUP_CONV_FAULT:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_CONV_FAULT_EINT);
- case WM8350_IRQ_WKUP_WDOG_RST:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_GP_PWR_ON:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_PWR_ON_EINT);
- case WM8350_IRQ_WKUP_ONKEY:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_ONKEY_EINT);
- case WM8350_IRQ_WKUP_GP_WAKEUP:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_WAKEUP_EINT);
- case WM8350_IRQ_GPIO(0):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP0_EINT);
- case WM8350_IRQ_GPIO(1):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP1_EINT);
- case WM8350_IRQ_GPIO(2):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP2_EINT);
- case WM8350_IRQ_GPIO(3):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP3_EINT);
- case WM8350_IRQ_GPIO(4):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP4_EINT);
- case WM8350_IRQ_GPIO(5):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP5_EINT);
- case WM8350_IRQ_GPIO(6):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP6_EINT);
- case WM8350_IRQ_GPIO(7):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP7_EINT);
- case WM8350_IRQ_GPIO(8):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP8_EINT);
- case WM8350_IRQ_GPIO(9):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP9_EINT);
- case WM8350_IRQ_GPIO(10):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP10_EINT);
- case WM8350_IRQ_GPIO(11):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP11_EINT);
- case WM8350_IRQ_GPIO(12):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP12_EINT);
- default:
- dev_warn(wm8350->dev, "Attempting to unmask unknown IRQ %d\n",
- irq);
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_unmask_irq);
-
int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
{
u16 reg, result = 0;
@@ -1264,7 +537,7 @@ static void wm8350_client_dev_register(struct wm8350 *wm8350,
int ret;
*pdev = platform_device_alloc(name, -1);
- if (pdev == NULL) {
+ if (*pdev == NULL) {
dev_err(wm8350->dev, "Failed to allocate %s\n", name);
return;
}
@@ -1409,49 +682,18 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
return ret;
}
- wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
-
mutex_init(&wm8350->auxadc_mutex);
- mutex_init(&wm8350->irq_mutex);
- if (irq) {
- int flags = IRQF_ONESHOT;
-
- if (pdata && pdata->irq_high) {
- flags |= IRQF_TRIGGER_HIGH;
-
- wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
- WM8350_IRQ_POL);
- } else {
- flags |= IRQF_TRIGGER_LOW;
-
- wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
- WM8350_IRQ_POL);
- }
- ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
- "wm8350", wm8350);
- if (ret != 0) {
- dev_err(wm8350->dev, "Failed to request IRQ: %d\n",
- ret);
- goto err;
- }
- } else {
- dev_err(wm8350->dev, "No IRQ configured\n");
+ ret = wm8350_irq_init(wm8350, irq, pdata);
+ if (ret < 0)
goto err;
- }
- wm8350->chip_irq = irq;
if (pdata && pdata->init) {
ret = pdata->init(wm8350);
if (ret != 0) {
dev_err(wm8350->dev, "Platform init() failed: %d\n",
ret);
- goto err;
+ goto err_irq;
}
}
@@ -1470,6 +712,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
return 0;
+err_irq:
+ wm8350_irq_exit(wm8350);
err:
kfree(wm8350->reg_cache);
return ret;
@@ -1493,7 +737,8 @@ void wm8350_device_exit(struct wm8350 *wm8350)
platform_device_unregister(wm8350->gpio.pdev);
platform_device_unregister(wm8350->codec.pdev);
- free_irq(wm8350->chip_irq, wm8350);
+ wm8350_irq_exit(wm8350);
+
kfree(wm8350->reg_cache);
}
EXPORT_SYMBOL_GPL(wm8350_device_exit);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
new file mode 100644
index 000000000000..c8df547c4747
--- /dev/null
+++ b/drivers/mfd/wm8350-irq.c
@@ -0,0 +1,529 @@
+/*
+ * wm8350-irq.c -- IRQ support for Wolfson WM8350
+ *
+ * Copyright 2007, 2008, 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood, Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/wm8350/core.h>
+#include <linux/mfd/wm8350/audio.h>
+#include <linux/mfd/wm8350/comparator.h>
+#include <linux/mfd/wm8350/gpio.h>
+#include <linux/mfd/wm8350/pmic.h>
+#include <linux/mfd/wm8350/rtc.h>
+#include <linux/mfd/wm8350/supply.h>
+#include <linux/mfd/wm8350/wdt.h>
+
+#define WM8350_NUM_IRQ_REGS 7
+
+#define WM8350_INT_OFFSET_1 0
+#define WM8350_INT_OFFSET_2 1
+#define WM8350_POWER_UP_INT_OFFSET 2
+#define WM8350_UNDER_VOLTAGE_INT_OFFSET 3
+#define WM8350_OVER_CURRENT_INT_OFFSET 4
+#define WM8350_GPIO_INT_OFFSET 5
+#define WM8350_COMPARATOR_INT_OFFSET 6
+
+struct wm8350_irq_data {
+ int primary;
+ int reg;
+ int mask;
+ int primary_only;
+};
+
+static struct wm8350_irq_data wm8350_irqs[] = {
+ [WM8350_IRQ_OC_LS] = {
+ .primary = WM8350_OC_INT,
+ .reg = WM8350_OVER_CURRENT_INT_OFFSET,
+ .mask = WM8350_OC_LS_EINT,
+ .primary_only = 1,
+ },
+ [WM8350_IRQ_UV_DC1] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC1_EINT,
+ },
+ [WM8350_IRQ_UV_DC2] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC2_EINT,
+ },
+ [WM8350_IRQ_UV_DC3] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC3_EINT,
+ },
+ [WM8350_IRQ_UV_DC4] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC4_EINT,
+ },
+ [WM8350_IRQ_UV_DC5] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC5_EINT,
+ },
+ [WM8350_IRQ_UV_DC6] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC6_EINT,
+ },
+ [WM8350_IRQ_UV_LDO1] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO1_EINT,
+ },
+ [WM8350_IRQ_UV_LDO2] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO2_EINT,
+ },
+ [WM8350_IRQ_UV_LDO3] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO3_EINT,
+ },
+ [WM8350_IRQ_UV_LDO4] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO4_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_HOT] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_HOT_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_COLD] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_COLD_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_FAIL] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_FAIL_EINT,
+ },
+ [WM8350_IRQ_CHG_TO] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_TO_EINT,
+ },
+ [WM8350_IRQ_CHG_END] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_END_EINT,
+ },
+ [WM8350_IRQ_CHG_START] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_START_EINT,
+ },
+ [WM8350_IRQ_CHG_FAST_RDY] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_FAST_RDY_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_3P9] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_3P9_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_3P1] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_3P1_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_2P85] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_2P85_EINT,
+ },
+ [WM8350_IRQ_RTC_ALM] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_ALM_EINT,
+ },
+ [WM8350_IRQ_RTC_SEC] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_SEC_EINT,
+ },
+ [WM8350_IRQ_RTC_PER] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_PER_EINT,
+ },
+ [WM8350_IRQ_CS1] = {
+ .primary = WM8350_CS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_CS1_EINT,
+ },
+ [WM8350_IRQ_CS2] = {
+ .primary = WM8350_CS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_CS2_EINT,
+ },
+ [WM8350_IRQ_SYS_HYST_COMP_FAIL] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_HYST_COMP_FAIL_EINT,
+ },
+ [WM8350_IRQ_SYS_CHIP_GT115] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_CHIP_GT115_EINT,
+ },
+ [WM8350_IRQ_SYS_CHIP_GT140] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_CHIP_GT140_EINT,
+ },
+ [WM8350_IRQ_SYS_WDOG_TO] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_WDOG_TO_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DATARDY] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DATARDY_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP4] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP4_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP3] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP3_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP2] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP2_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP1] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP1_EINT,
+ },
+ [WM8350_IRQ_USB_LIMIT] = {
+ .primary = WM8350_USB_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_USB_LIMIT_EINT,
+ .primary_only = 1,
+ },
+ [WM8350_IRQ_WKUP_OFF_STATE] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_OFF_STATE_EINT,
+ },
+ [WM8350_IRQ_WKUP_HIB_STATE] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_HIB_STATE_EINT,
+ },
+ [WM8350_IRQ_WKUP_CONV_FAULT] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_CONV_FAULT_EINT,
+ },
+ [WM8350_IRQ_WKUP_WDOG_RST] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_WDOG_RST_EINT,
+ },
+ [WM8350_IRQ_WKUP_GP_PWR_ON] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_GP_PWR_ON_EINT,
+ },
+ [WM8350_IRQ_WKUP_ONKEY] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_ONKEY_EINT,
+ },
+ [WM8350_IRQ_WKUP_GP_WAKEUP] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_GP_WAKEUP_EINT,
+ },
+ [WM8350_IRQ_CODEC_JCK_DET_L] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_JCK_DET_L_EINT,
+ },
+ [WM8350_IRQ_CODEC_JCK_DET_R] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_JCK_DET_R_EINT,
+ },
+ [WM8350_IRQ_CODEC_MICSCD] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_MICSCD_EINT,
+ },
+ [WM8350_IRQ_CODEC_MICD] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_MICD_EINT,
+ },
+ [WM8350_IRQ_EXT_USB_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_USB_FB_EINT,
+ },
+ [WM8350_IRQ_EXT_WALL_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_WALL_FB_EINT,
+ },
+ [WM8350_IRQ_EXT_BAT_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_BAT_FB_EINT,
+ },
+ [WM8350_IRQ_GPIO(0)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP0_EINT,
+ },
+ [WM8350_IRQ_GPIO(1)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP1_EINT,
+ },
+ [WM8350_IRQ_GPIO(2)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP2_EINT,
+ },
+ [WM8350_IRQ_GPIO(3)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP3_EINT,
+ },
+ [WM8350_IRQ_GPIO(4)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP4_EINT,
+ },
+ [WM8350_IRQ_GPIO(5)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP5_EINT,
+ },
+ [WM8350_IRQ_GPIO(6)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP6_EINT,
+ },
+ [WM8350_IRQ_GPIO(7)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP7_EINT,
+ },
+ [WM8350_IRQ_GPIO(8)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP8_EINT,
+ },
+ [WM8350_IRQ_GPIO(9)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP9_EINT,
+ },
+ [WM8350_IRQ_GPIO(10)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP10_EINT,
+ },
+ [WM8350_IRQ_GPIO(11)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP11_EINT,
+ },
+ [WM8350_IRQ_GPIO(12)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP12_EINT,
+ },
+};
+
+static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
+{
+ mutex_lock(&wm8350->irq_mutex);
+
+ if (wm8350->irq[irq].handler)
+ wm8350->irq[irq].handler(irq, wm8350->irq[irq].data);
+ else {
+ dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n",
+ irq);
+ wm8350_mask_irq(wm8350, irq);
+ }
+
+ mutex_unlock(&wm8350->irq_mutex);
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since all
+ * interrupts are clear on read the IRQ line will be reasserted and
+ * the physical IRQ will be handled again if another interrupt is
+ * asserted while we run - in the normal course of events this is a
+ * rare occurrence so we save I2C/SPI reads.
+ */
+static irqreturn_t wm8350_irq(int irq, void *irq_data)
+{
+ struct wm8350 *wm8350 = irq_data;
+ u16 level_one;
+ u16 sub_reg[WM8350_NUM_IRQ_REGS];
+ int read_done[WM8350_NUM_IRQ_REGS];
+ struct wm8350_irq_data *data;
+ int i;
+
+ /* TODO: Use block reads to improve performance? */
+ level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS)
+ & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK);
+
+ if (!level_one)
+ return IRQ_NONE;
+
+ memset(&read_done, 0, sizeof(read_done));
+
+ for (i = 0; i < ARRAY_SIZE(wm8350_irqs); i++) {
+ data = &wm8350_irqs[i];
+
+ if (!(level_one & data->primary))
+ continue;
+
+ if (!read_done[data->reg]) {
+ sub_reg[data->reg] =
+ wm8350_reg_read(wm8350, WM8350_INT_STATUS_1 +
+ data->reg);
+ sub_reg[data->reg] &=
+ ~wm8350_reg_read(wm8350,
+ WM8350_INT_STATUS_1_MASK +
+ data->reg);
+ read_done[data->reg] = 1;
+ }
+
+ if (sub_reg[data->reg] & data->mask)
+ wm8350_irq_call_handler(wm8350, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wm8350_register_irq(struct wm8350 *wm8350, int irq,
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data)
+{
+ if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
+ return -EINVAL;
+
+ if (wm8350->irq[irq].handler)
+ return -EBUSY;
+
+ mutex_lock(&wm8350->irq_mutex);
+ wm8350->irq[irq].handler = handler;
+ wm8350->irq[irq].data = data;
+ mutex_unlock(&wm8350->irq_mutex);
+
+ wm8350_unmask_irq(wm8350, irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_register_irq);
+
+int wm8350_free_irq(struct wm8350 *wm8350, int irq)
+{
+ if (irq < 0 || irq > WM8350_NUM_IRQ)
+ return -EINVAL;
+
+ wm8350_mask_irq(wm8350, irq);
+
+ mutex_lock(&wm8350->irq_mutex);
+ wm8350->irq[irq].handler = NULL;
+ mutex_unlock(&wm8350->irq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_free_irq);
+
+int wm8350_mask_irq(struct wm8350 *wm8350, int irq)
+{
+ return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK +
+ wm8350_irqs[irq].reg,
+ wm8350_irqs[irq].mask);
+}
+EXPORT_SYMBOL_GPL(wm8350_mask_irq);
+
+int wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
+{
+ return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK +
+ wm8350_irqs[irq].reg,
+ wm8350_irqs[irq].mask);
+}
+EXPORT_SYMBOL_GPL(wm8350_unmask_irq);
+
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata)
+{
+ int ret;
+ int flags = IRQF_ONESHOT;
+
+ if (!irq) {
+ dev_err(wm8350->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
+
+ mutex_init(&wm8350->irq_mutex);
+ wm8350->chip_irq = irq;
+
+ if (pdata && pdata->irq_high) {
+ flags |= IRQF_TRIGGER_HIGH;
+
+ wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
+ WM8350_IRQ_POL);
+ } else {
+ flags |= IRQF_TRIGGER_LOW;
+
+ wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
+ WM8350_IRQ_POL);
+ }
+
+ ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
+ "wm8350", wm8350);
+ if (ret != 0)
+ dev_err(wm8350->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int wm8350_irq_exit(struct wm8350 *wm8350)
+{
+ free_irq(wm8350->chip_irq, wm8350);
+ return 0;
+}
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 7ccc1eab98ab..e965139e5cd5 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -3170,14 +3170,6 @@ const u16 wm8352_mode3_defaults[] = {
};
#endif
-/* The register defaults for the config mode used must be compiled in but
- * due to the impact on kernel size it is possible to disable
- */
-#ifndef WM8350_HAVE_CONFIG_MODE
-#warning No WM8350 config modes supported - select at least one of the
-#warning MFD_WM8350_CONFIG_MODE_n options from the board driver.
-#endif
-
/*
* Access masks.
*/
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2c16ca6501d5..59f4ba1b7034 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -13,6 +13,20 @@ menuconfig MISC_DEVICES
if MISC_DEVICES
+config AD525X_DPOT
+ tristate "Analog Devices AD525x Digital Potentiometers"
+ depends on I2C && SYSFS
+ help
+ If you say yes here, you get support for the Analog Devices
+ AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
+ digital potentiometer chips.
+
+ See Documentation/misc-devices/ad525x_dpot.txt for the
+ userspace interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called ad525x_dpot.
+
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -173,6 +187,30 @@ config SGI_XP
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
+config CS5535_MFGPT
+ tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
+ depends on PCI
+ depends on X86
+ default n
+ help
+ This driver provides access to MFGPT functionality for other
+ drivers that need timers. MFGPTs are available in the CS5535 and
+ CS5536 companion chips that are found in AMD Geode and several
+ other platforms. They have a better resolution and max interval
+ than the generic PIT, and are suitable for use as high-res timers.
+ You probably don't want to enable this manually; other drivers that
+ make use of it should enable it.
+
+config CS5535_MFGPT_DEFAULT_IRQ
+ int
+ default 7
+ help
+ MFGPTs on the CS5535 require an interrupt. The selected IRQ
+ can be overridden as a module option as well as by driver that
+ use the cs5535_mfgpt_ API; however, different architectures might
+ want to use a different IRQ by default. This is here for
+ architectures to set as necessary.
+
config HP_ILO
tristate "Channel interface driver for HP iLO/iLO2 processor"
depends on PCI
@@ -256,6 +294,16 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
+config TI_DAC7512
+ tristate "Texas Instruments DAC7512"
+ depends on SPI && SYSFS
+ help
+ If you say yes here you get support for the Texas Instruments
+ DAC7512 16-bit digital-to-analog converter.
+
+ This driver can also be built as a module. If so, the module
+ will be calles ti_dac7512.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 906a0edcea40..049ff2482f30 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
+obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
@@ -17,10 +18,12 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
+obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
+obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-y += eeprom/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
new file mode 100644
index 000000000000..30a59f2bacd2
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.c
@@ -0,0 +1,666 @@
+/*
+ * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
+ * Copyright (c) 2009 Analog Devices, Inc.
+ * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * DEVID #Wipers #Positions Resistor Options (kOhm)
+ * AD5258 1 64 1, 10, 50, 100
+ * AD5259 1 256 5, 10, 50, 100
+ * AD5251 2 64 1, 10, 50, 100
+ * AD5252 2 256 1, 10, 50, 100
+ * AD5255 3 512 25, 250
+ * AD5253 4 64 1, 10, 50, 100
+ * AD5254 4 256 1, 10, 50, 100
+ *
+ * See Documentation/misc-devices/ad525x_dpot.txt for more info.
+ *
+ * derived from ad5258.c
+ * Copyright (c) 2009 Cyber Switching, Inc.
+ * Author: Chris Verges <chrisv@cyberswitching.com>
+ *
+ * derived from ad5252.c
+ * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+#define DRIVER_NAME "ad525x_dpot"
+#define DRIVER_VERSION "0.1"
+
+enum dpot_devid {
+ AD5258_ID,
+ AD5259_ID,
+ AD5251_ID,
+ AD5252_ID,
+ AD5253_ID,
+ AD5254_ID,
+ AD5255_ID,
+};
+
+#define AD5258_MAX_POSITION 64
+#define AD5259_MAX_POSITION 256
+#define AD5251_MAX_POSITION 64
+#define AD5252_MAX_POSITION 256
+#define AD5253_MAX_POSITION 64
+#define AD5254_MAX_POSITION 256
+#define AD5255_MAX_POSITION 512
+
+#define AD525X_RDAC0 0
+#define AD525X_RDAC1 1
+#define AD525X_RDAC2 2
+#define AD525X_RDAC3 3
+
+#define AD525X_REG_TOL 0x18
+#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
+#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
+#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
+#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define AD525X_I2C_RDAC (0x00 << 5)
+#define AD525X_I2C_EEPROM (0x01 << 5)
+#define AD525X_I2C_CMD (0x80)
+
+#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
+#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
+#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
+#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
+
+static s32 ad525x_read(struct i2c_client *client, u8 reg);
+static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct dpot_data {
+ struct mutex update_lock;
+ unsigned rdac_mask;
+ unsigned max_pos;
+ unsigned devid;
+};
+
+/* sysfs functions */
+
+static ssize_t sysfs_show_reg(struct device *dev,
+ struct device_attribute *attr, char *buf, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+ s32 value;
+
+ mutex_lock(&data->update_lock);
+ value = ad525x_read(client, reg);
+ mutex_unlock(&data->update_lock);
+
+ if (value < 0)
+ return -EINVAL;
+ /*
+ * Let someone else deal with converting this ...
+ * the tolerance is a two-byte value where the MSB
+ * is a sign + integer value, and the LSB is a
+ * decimal value. See page 18 of the AD5258
+ * datasheet (Rev. A) for more details.
+ */
+
+ if (reg & AD525X_REG_TOL)
+ return sprintf(buf, "0x%04x\n", value & 0xFFFF);
+ else
+ return sprintf(buf, "%u\n", value & data->rdac_mask);
+}
+
+static ssize_t sysfs_set_reg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+ unsigned long value;
+ int err;
+
+ err = strict_strtoul(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > data->rdac_mask)
+ value = data->rdac_mask;
+
+ mutex_lock(&data->update_lock);
+ ad525x_write(client, reg, value);
+ if (reg & AD525X_I2C_EEPROM)
+ msleep(26); /* Sleep while the EEPROM updates */
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t sysfs_do_cmd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+ ad525x_write(client, reg, 0);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
+}
+
+static ssize_t set_rdac0(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC0);
+}
+
+static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
+
+static ssize_t show_eeprom0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
+}
+
+static ssize_t set_eeprom0(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC0);
+}
+
+static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
+
+static ssize_t show_tolerance0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
+}
+
+static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
+}
+
+static ssize_t set_rdac1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC1);
+}
+
+static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
+
+static ssize_t show_eeprom1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
+}
+
+static ssize_t set_eeprom1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC1);
+}
+
+static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
+
+static ssize_t show_tolerance1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
+}
+
+static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
+}
+
+static ssize_t set_rdac2(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC2);
+}
+
+static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
+
+static ssize_t show_eeprom2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
+}
+
+static ssize_t set_eeprom2(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC2);
+}
+
+static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
+
+static ssize_t show_tolerance2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
+}
+
+static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
+}
+
+static ssize_t set_rdac3(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC3);
+}
+
+static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
+
+static ssize_t show_eeprom3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
+}
+
+static ssize_t set_eeprom3(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC3);
+}
+
+static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
+
+static ssize_t show_tolerance3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
+}
+
+static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
+
+static struct attribute *ad525x_attributes_wipers[4][4] = {
+ {
+ &dev_attr_rdac0.attr,
+ &dev_attr_eeprom0.attr,
+ &dev_attr_tolerance0.attr,
+ NULL
+ }, {
+ &dev_attr_rdac1.attr,
+ &dev_attr_eeprom1.attr,
+ &dev_attr_tolerance1.attr,
+ NULL
+ }, {
+ &dev_attr_rdac2.attr,
+ &dev_attr_eeprom2.attr,
+ &dev_attr_tolerance2.attr,
+ NULL
+ }, {
+ &dev_attr_rdac3.attr,
+ &dev_attr_eeprom3.attr,
+ &dev_attr_tolerance3.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group ad525x_group_wipers[] = {
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
+};
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t set_inc_all(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
+}
+
+static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
+
+static ssize_t set_dec_all(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
+}
+
+static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
+
+static ssize_t set_inc_all_6db(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
+}
+
+static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
+
+static ssize_t set_dec_all_6db(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
+}
+
+static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
+
+static struct attribute *ad525x_attributes_commands[] = {
+ &dev_attr_inc_all.attr,
+ &dev_attr_dec_all.attr,
+ &dev_attr_inc_all_6db.attr,
+ &dev_attr_dec_all_6db.attr,
+ NULL
+};
+
+static const struct attribute_group ad525x_group_commands = {
+ .attrs = ad525x_attributes_commands,
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* i2c device functions */
+
+/**
+ * ad525x_read - return the value contained in the specified register
+ * on the AD5258 device.
+ * @client: value returned from i2c_new_device()
+ * @reg: the register to read
+ *
+ * If the tolerance register is specified, 2 bytes are returned.
+ * Otherwise, 1 byte is returned. A negative value indicates an error
+ * occurred while reading the register.
+ */
+static s32 ad525x_read(struct i2c_client *client, u8 reg)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
+ return i2c_smbus_read_word_data(client, (reg & 0xF8) |
+ ((reg & 0x7) << 1));
+ else
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+/**
+ * ad525x_write - store the given value in the specified register on
+ * the AD5258 device.
+ * @client: value returned from i2c_new_device()
+ * @reg: the register to write
+ * @value: the byte to store in the register
+ *
+ * For certain instructions that do not require a data byte, "NULL"
+ * should be specified for the "value" parameter. These instructions
+ * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
+ *
+ * A negative return value indicates an error occurred while reading
+ * the register.
+ */
+static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ /* Only write the instruction byte for certain commands */
+ if (reg & AD525X_I2C_CMD)
+ return i2c_smbus_write_byte(client, reg);
+
+ if (data->max_pos > 256)
+ return i2c_smbus_write_word_data(client, (reg & 0xF8) |
+ ((reg & 0x7) << 1), value);
+ else
+ /* All other registers require instruction + data bytes */
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int ad525x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct dpot_data *data;
+ int err = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(dev, "missing I2C functionality for this driver\n");
+ goto exit;
+ }
+
+ data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ switch (id->driver_data) {
+ case AD5258_ID:
+ data->max_pos = AD5258_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5259_ID:
+ data->max_pos = AD5259_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5251_ID:
+ data->max_pos = AD5251_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5252_ID:
+ data->max_pos = AD5252_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5253_ID:
+ data->max_pos = AD5253_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5254_ID:
+ data->max_pos = AD5254_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5255_ID:
+ data->max_pos = AD5255_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ default:
+ err = -ENODEV;
+ goto exit_free;
+ }
+
+ if (err) {
+ dev_err(dev, "failed to register sysfs hooks\n");
+ goto exit_free;
+ }
+
+ data->devid = id->driver_data;
+ data->rdac_mask = data->max_pos - 1;
+
+ dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
+ id->name, data->max_pos);
+
+ return 0;
+
+exit_free:
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+exit:
+ dev_err(dev, "failed to create client\n");
+ return err;
+}
+
+static int __devexit ad525x_remove(struct i2c_client *client)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ switch (data->devid) {
+ case AD5258_ID:
+ case AD5259_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5251_ID:
+ case AD5252_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5253_ID:
+ case AD5254_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5255_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad525x_idtable[] = {
+ {"ad5258", AD5258_ID},
+ {"ad5259", AD5259_ID},
+ {"ad5251", AD5251_ID},
+ {"ad5252", AD5252_ID},
+ {"ad5253", AD5253_ID},
+ {"ad5254", AD5254_ID},
+ {"ad5255", AD5255_ID},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
+
+static struct i2c_driver ad525x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .id_table = ad525x_idtable,
+ .probe = ad525x_probe,
+ .remove = __devexit_p(ad525x_remove),
+};
+
+static int __init ad525x_init(void)
+{
+ return i2c_add_driver(&ad525x_driver);
+}
+
+module_init(ad525x_init);
+
+static void __exit ad525x_exit(void)
+{
+ i2c_del_driver(&ad525x_driver);
+}
+
+module_exit(ad525x_exit);
+
+MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
+ "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
+MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
new file mode 100644
index 000000000000..8110460558ff
--- /dev/null
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -0,0 +1,370 @@
+/*
+ * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/cs5535.h>
+
+#define DRV_NAME "cs5535-mfgpt"
+#define MFGPT_BAR 2
+
+static int mfgpt_reset_timers;
+module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
+MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; "
+ "required by some broken BIOSes (ie, TinyBIOS < 0.99).");
+
+struct cs5535_mfgpt_timer {
+ struct cs5535_mfgpt_chip *chip;
+ int nr;
+};
+
+static struct cs5535_mfgpt_chip {
+ DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
+ resource_size_t base;
+
+ struct pci_dev *pdev;
+ spinlock_t lock;
+ int initialized;
+} cs5535_mfgpt_chip;
+
+int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable)
+{
+ uint32_t msr, mask, value, dummy;
+ int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * The register maps for these are described in sections 6.17.1.x of
+ * the AMD Geode CS5536 Companion Device Data Book.
+ */
+ switch (event) {
+ case MFGPT_EVENT_RESET:
+ /*
+ * XXX: According to the docs, we cannot reset timers above
+ * 6; that is, resets for 7 and 8 will be ignored. Is this
+ * a problem? -dilinger
+ */
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + 24);
+ break;
+
+ case MFGPT_EVENT_NMI:
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ case MFGPT_EVENT_IRQ:
+ msr = MSR_MFGPT_IRQ;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ rdmsr(msr, value, dummy);
+
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ wrmsr(msr, value, dummy);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
+
+int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
+ int enable)
+{
+ uint32_t zsel, lpc, dummy;
+ int shift;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
+ * is using the same CMP of the timer's Siamese twin, the IRQ is set to
+ * 2, and we mustn't use nor change it.
+ * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
+ * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
+ * with *irq==0 is safe. Currently there _are_ no 2 drivers.
+ */
+ rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
+ if (((zsel >> shift) & 0xF) == 2)
+ return -EIO;
+
+ /* Choose IRQ: if none supplied, keep IRQ already set or use default */
+ if (!*irq)
+ *irq = (zsel >> shift) & 0xF;
+ if (!*irq)
+ *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+
+ /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
+ if (*irq < 1 || *irq == 2 || *irq > 15)
+ return -EIO;
+ rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
+ if (lpc & (1 << *irq))
+ return -EIO;
+
+ /* All chosen and checked - go for it */
+ if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ return -EIO;
+ if (enable) {
+ zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
+ wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
+
+struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
+{
+ struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
+ struct cs5535_mfgpt_timer *timer = NULL;
+ unsigned long flags;
+ int max;
+
+ if (!mfgpt->initialized)
+ goto done;
+
+ /* only allocate timers from the working domain if requested */
+ if (domain == MFGPT_DOMAIN_WORKING)
+ max = 6;
+ else
+ max = MFGPT_MAX_TIMERS;
+
+ if (timer_nr >= max) {
+ /* programmer error. silly programmers! */
+ WARN_ON(1);
+ goto done;
+ }
+
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ if (timer_nr < 0) {
+ unsigned long t;
+
+ /* try to find any available timer */
+ t = find_first_bit(mfgpt->avail, max);
+ /* set timer_nr to -1 if no timers available */
+ timer_nr = t < max ? (int) t : -1;
+ } else {
+ /* check if the requested timer's available */
+ if (test_bit(timer_nr, mfgpt->avail))
+ timer_nr = -1;
+ }
+
+ if (timer_nr >= 0)
+ /* if timer_nr is not -1, it's an available timer */
+ __clear_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ if (timer_nr < 0)
+ goto done;
+
+ timer = kmalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer) {
+ /* aw hell */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ __set_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+ goto done;
+ }
+ timer->chip = mfgpt;
+ timer->nr = timer_nr;
+ dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
+
+done:
+ return timer;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
+
+/*
+ * XXX: This frees the timer memory, but never resets the actual hardware
+ * timer. The old geode_mfgpt code did this; it would be good to figure
+ * out a way to actually release the hardware timer. See comments below.
+ */
+void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
+{
+ kfree(timer);
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
+
+uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
+{
+ return inw(timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
+
+void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value)
+{
+ outw(value, timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
+
+/*
+ * This is a sledgehammer that resets all MFGPT timers. This is required by
+ * some broken BIOSes which leave the system in an unstable state
+ * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
+ * whether or not this secret MSR can be used to release individual timers.
+ * Jordan tells me that he and Mitch once played w/ it, but it's unclear
+ * what the results of that were (and they experienced some instability).
+ */
+static void __init reset_all_timers(void)
+{
+ uint32_t val, dummy;
+
+ /* The following undocumented bit resets the MFGPT timers */
+ val = 0xFF; dummy = 0;
+ wrmsr(MSR_MFGPT_SETUP, val, dummy);
+}
+
+/*
+ * Check whether any MFGPTs are available for the kernel to use. In most
+ * cases, firmware that uses AMD's VSA code will claim all timers during
+ * bootup; we certainly don't want to take them if they're already in use.
+ * In other cases (such as with VSAless OpenFirmware), the system firmware
+ * leaves timers available for us to use.
+ */
+static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+{
+ struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
+ unsigned long flags;
+ int timers = 0;
+ uint16_t val;
+ int i;
+
+ /* bios workaround */
+ if (mfgpt_reset_timers)
+ reset_all_timers();
+
+ /* just to be safe, protect this section w/ lock */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
+ timer.nr = i;
+ val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ __set_bit(i, mfgpt->avail);
+ timers++;
+ }
+ }
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ return timers;
+}
+
+static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err, t;
+
+ /* There are two ways to get the MFGPT base address; one is by
+ * fetching it from MSR_LBAR_MFGPT, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ err = pci_enable_device_io(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable device IO\n");
+ goto done;
+ }
+
+ err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
+ cs5535_mfgpt_chip.pdev = pdev;
+ spin_lock_init(&cs5535_mfgpt_chip.lock);
+
+ dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
+ (unsigned long long) cs5535_mfgpt_chip.base);
+
+ /* detect the available timers */
+ t = scan_timers(&cs5535_mfgpt_chip);
+ dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
+ cs5535_mfgpt_chip.initialized = 1;
+ return 0;
+
+done:
+ return err;
+}
+
+static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
+
+/*
+ * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
+ * registration stuff. It only allows only one driver to bind to each PCI
+ * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
+ * device. Instead, we manually scan for the PCI device, request a single
+ * region, and keep track of the devices that we're using.
+ */
+
+static int __init cs5535_mfgpt_scan_pci(void)
+{
+ struct pci_dev *pdev;
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
+ pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
+ cs5535_mfgpt_pci_tbl[i].device, NULL);
+ if (pdev) {
+ err = cs5535_mfgpt_probe(pdev,
+ &cs5535_mfgpt_pci_tbl[i]);
+ if (err)
+ pci_dev_put(pdev);
+
+ /* we only support a single CS5535/6 southbridge */
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int __init cs5535_mfgpt_init(void)
+{
+ return cs5535_mfgpt_scan_pci();
+}
+
+module_init(cs5535_mfgpt_init);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..f939ebc2507c 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -32,9 +32,6 @@
static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
0x55, 0x56, 0x57, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(eeprom);
-
/* Size of EEPROM in bytes */
#define EEPROM_SIZE 256
@@ -135,8 +132,7 @@ static struct bin_attribute eeprom_attr = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int eeprom_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -233,7 +229,7 @@ static struct i2c_driver eeprom_driver = {
.class = I2C_CLASS_DDC | I2C_CLASS_SPD,
.detect = eeprom_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init eeprom_init(void)
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 4bb7a3af9ad9..395a4ea64e9c 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -30,9 +30,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ics932s401);
-
/* ICS932S401 registers */
#define ICS932S401_REG_CFG2 0x01
#define ICS932S401_CFG1_SPREAD 0x01
@@ -106,12 +103,12 @@ struct ics932s401_data {
static int ics932s401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int ics932s401_detect(struct i2c_client *client, int kind,
+static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
- { "ics932s401", ics932s401 },
+ { "ics932s401", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ics932s401_id);
@@ -125,7 +122,7 @@ static struct i2c_driver ics932s401_driver = {
.remove = ics932s401_remove,
.id_table = ics932s401_id,
.detect = ics932s401_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static struct ics932s401_data *ics932s401_update_device(struct device *dev)
@@ -413,7 +410,7 @@ static ssize_t show_spread(struct device *dev,
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ics932s401_detect(struct i2c_client *client, int kind,
+static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 60b0b1a4fb3a..09dcb699e667 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -138,7 +138,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
-static void
+static void __devinit
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
@@ -230,7 +230,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
-static unsigned int
+static unsigned int __devinit
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
@@ -269,7 +269,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
return IOC4_VARIANT_PCI_RT;
}
-static void
+static void __devinit
ioc4_load_modules(struct work_struct *work)
{
/* arg just has to be freed */
@@ -280,7 +280,7 @@ ioc4_load_modules(struct work_struct *work)
}
/* Adds a new instance of an IOC4 card */
-static int
+static int __devinit
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
@@ -425,7 +425,7 @@ out:
}
/* Removes a particular instance of an IOC4 card. */
-static void
+static void __devexit
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
@@ -476,7 +476,7 @@ static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
- .remove = ioc4_remove,
+ .remove = __devexit_p(ioc4_remove),
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
@@ -486,14 +486,14 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table);
*********************/
/* Module load */
-static int __devinit
+static int __init
ioc4_init(void)
{
return pci_register_driver(&ioc4_driver);
}
/* Module unload */
-static void __devexit
+static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
new file mode 100644
index 000000000000..d3f229a3a77e
--- /dev/null
+++ b/drivers/misc/ti_dac7512.c
@@ -0,0 +1,101 @@
+/*
+ * dac7512.c - Linux kernel module for
+ * Texas Instruments DAC7512
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+
+#define DAC7512_DRV_NAME "dac7512"
+#define DRIVER_VERSION "1.0"
+
+static ssize_t dac7512_store_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char tmp[2];
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ tmp[0] = val >> 8;
+ tmp[1] = val & 0xff;
+ spi_write(spi, tmp, sizeof(tmp));
+ return count;
+}
+
+static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
+
+static struct attribute *dac7512_attributes[] = {
+ &dev_attr_value.attr,
+ NULL
+};
+
+static const struct attribute_group dac7512_attr_group = {
+ .attrs = dac7512_attributes,
+};
+
+static int __devinit dac7512_probe(struct spi_device *spi)
+{
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
+}
+
+static int __devexit dac7512_remove(struct spi_device *spi)
+{
+ sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
+ return 0;
+}
+
+static struct spi_driver dac7512_driver = {
+ .driver = {
+ .name = DAC7512_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dac7512_probe,
+ .remove = __devexit_p(dac7512_remove),
+};
+
+static int __init dac7512_init(void)
+{
+ return spi_register_driver(&dac7512_driver);
+}
+
+static void __exit dac7512_exit(void)
+{
+ spi_unregister_driver(&dac7512_driver);
+}
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("DAC7512 16-bit DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(dac7512_init);
+module_exit(dac7512_exit);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ab37a6d9d32a..bb22ffd76ef8 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -3,7 +3,7 @@
#
config MMC_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
+ bool "Assume MMC/SD cards are non-removable (DANGEROUS)"
help
If you say Y here, the MMC layer will assume that all cards
stayed in their respective slots during the suspend. The
@@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME
This option is usually just for embedded systems which use
a MMC/SD card for rootfs. Most people should say N here.
+ This option sets a default which can be overridden by the
+ module parameter "removable=0" or "removable=1".
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7dab2e5f4bc9..30acd5265821 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,6 +48,22 @@ int use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
+ * We normally treat cards as removed during suspend if they are not
+ * known to be on a non-removable bus, to avoid the risk of writing
+ * back data to a different card after resume. Allow this to be
+ * overridden if necessary.
+ */
+#ifdef CONFIG_MMC_UNSAFE_RESUME
+int mmc_assume_removable;
+#else
+int mmc_assume_removable = 1;
+#endif
+module_param_named(removable, mmc_assume_removable, bool, 0644);
+MODULE_PARM_DESC(
+ removable,
+ "MMC/SD cards are removable and may be removed during suspend");
+
+/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 67ae6abc4230..a811c52a1659 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
int mmc_attach_sd(struct mmc_host *host, u32 ocr);
int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+/* Module parameters */
extern int use_spi_crc;
+extern int mmc_assume_removable;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bfefce365ae7..c11189446a1f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host)
return err;
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
-static const struct mmc_bus_ops mmc_ops = {
- .awake = mmc_awake,
- .sleep = mmc_sleep,
- .remove = mmc_remove,
- .detect = mmc_detect,
- .suspend = mmc_suspend,
- .resume = mmc_resume,
- .power_restore = mmc_power_restore,
-};
-
-static void mmc_attach_bus_ops(struct mmc_host *host)
-{
- mmc_attach_bus(host, &mmc_ops);
-}
-
-#else
-
static const struct mmc_bus_ops mmc_ops = {
.awake = mmc_awake,
.sleep = mmc_sleep,
@@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
bus_ops = &mmc_ops_unsafe;
else
bus_ops = &mmc_ops;
mmc_attach_bus(host, bus_ops);
}
-#endif
-
/*
* Starting point for MMC card init.
*/
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 10b2a4d20f5a..fdd414eded09 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host)
mmc_release_host(host);
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
-static const struct mmc_bus_ops mmc_sd_ops = {
- .remove = mmc_sd_remove,
- .detect = mmc_sd_detect,
- .suspend = mmc_sd_suspend,
- .resume = mmc_sd_resume,
- .power_restore = mmc_sd_power_restore,
-};
-
-static void mmc_sd_attach_bus_ops(struct mmc_host *host)
-{
- mmc_attach_bus(host, &mmc_sd_ops);
-}
-
-#else
-
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
@@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
bus_ops = &mmc_sd_ops_unsafe;
else
bus_ops = &mmc_sd_ops;
mmc_attach_bus(host, bus_ops);
}
-#endif
-
/*
* Starting point for SD card init.
*/
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f85dcd536508..9538389783c1 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,26 +97,56 @@ static const unsigned char speed_val[16] =
static const unsigned int speed_unit[8] =
{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
-/* FUNCE tuples with these types get passed to SDIO drivers */
-static const unsigned char funce_type_whitelist[] = {
- 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
+
+typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
+ const unsigned char *, unsigned);
+
+struct cis_tpl {
+ unsigned char code;
+ unsigned char min_size;
+ tpl_parse_t *parse;
};
-static int cistpl_funce_whitelisted(unsigned char type)
+static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
+ const char *tpl_descr,
+ const struct cis_tpl *tpl, int tpl_count,
+ unsigned char code,
+ const unsigned char *buf, unsigned size)
{
- int i;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
- if (funce_type_whitelist[i] == type)
- return 1;
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->code == code)
+ break;
}
- return 0;
+ if (i < tpl_count) {
+ if (size >= tpl->min_size) {
+ if (tpl->parse)
+ ret = tpl->parse(card, func, buf, size);
+ else
+ ret = -EILSEQ; /* known tuple, not parsed */
+ } else {
+ /* invalid tuple */
+ ret = -EINVAL;
+ }
+ if (ret && ret != -EILSEQ && ret != -ENOENT) {
+ printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host), tpl_descr, code, size);
+ }
+ } else {
+ /* unknown tuple */
+ ret = -ENOENT;
+ }
+
+ return ret;
}
-static int cistpl_funce_common(struct mmc_card *card,
+static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
- if (size < 0x04 || buf[0] != 0)
+ /* Only valid for the common CIS (function 0) */
+ if (func)
return -EINVAL;
/* TPLFE_FN0_BLK_SIZE */
@@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card,
return 0;
}
-static int cistpl_funce_func(struct sdio_func *func,
+static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
unsigned vsn;
unsigned min_size;
- /* let SDIO drivers take care of whitelisted FUNCE tuples */
- if (cistpl_funce_whitelisted(buf[0]))
- return -EILSEQ;
+ /* Only valid for the individual function's CIS (1-7) */
+ if (!func)
+ return -EINVAL;
+ /*
+ * This tuple has a different length depending on the SDIO spec
+ * version.
+ */
vsn = func->card->cccr.sdio_vsn;
min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
- if (size < min_size || buf[0] != 1)
+ if (size < min_size)
return -EINVAL;
/* TPLFE_MAX_BLK_SIZE */
@@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func,
return 0;
}
+/*
+ * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
+ *
+ * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
+ * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
+ * TPLFID_FUNCTION is always hardcoded to 0x0C.
+ */
+static const struct cis_tpl cis_tpl_funce_list[] = {
+ { 0x00, 4, cistpl_funce_common },
+ { 0x01, 0, cistpl_funce_func },
+ { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
+};
+
static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
- int ret;
-
- /*
- * There should be two versions of the CISTPL_FUNCE tuple,
- * one for the common CIS (function 0) and a version used by
- * the individual function's CIS (1-7). Yet, the later has a
- * different length depending on the SDIO spec version.
- */
- if (func)
- ret = cistpl_funce_func(func, buf, size);
- else
- ret = cistpl_funce_common(card, buf, size);
-
- if (ret && ret != -EILSEQ) {
- printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
- "type %u\n", mmc_hostname(card->host), size, buf[0]);
- }
+ if (size < 1)
+ return -EINVAL;
- return ret;
+ return cis_tpl_parse(card, func, "CISTPL_FUNCE",
+ cis_tpl_funce_list,
+ ARRAY_SIZE(cis_tpl_funce_list),
+ buf[0], buf, size);
}
-typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
- const unsigned char *, unsigned);
-
-struct cis_tpl {
- unsigned char code;
- unsigned char min_size;
- tpl_parse_t *parse;
-};
-
+/* Known TPL_CODEs table for CIS tuples */
static const struct cis_tpl cis_tpl_list[] = {
{ 0x15, 3, cistpl_vers_1 },
{ 0x20, 4, cistpl_manfid },
@@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
break;
}
- for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
- if (cis_tpl_list[i].code == tpl_code)
- break;
- if (i < ARRAY_SIZE(cis_tpl_list)) {
- const struct cis_tpl *tpl = cis_tpl_list + i;
- if (tpl_link < tpl->min_size) {
- printk(KERN_ERR
- "%s: bad CIS tuple 0x%02x"
- " (length = %u, expected >= %u)\n",
- mmc_hostname(card->host),
- tpl_code, tpl_link, tpl->min_size);
- ret = -EINVAL;
- } else if (tpl->parse) {
- ret = tpl->parse(card, func,
- this->data, tpl_link);
- }
+ /* Try to parse the CIS tuple */
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data, tpl_link);
+ if (ret == -EILSEQ || ret == -ENOENT) {
/*
- * We don't need the tuple anymore if it was
- * successfully parsed by the SDIO core or if it is
- * not going to be parsed by SDIO drivers.
+ * The tuple is unknown or known but not parsed.
+ * Queue the tuple for the function driver.
*/
- if (!ret || ret != -EILSEQ)
- kfree(this);
- } else {
- /* unknown tuple */
- ret = -EILSEQ;
- }
-
- if (ret == -EILSEQ) {
- /* this tuple is unknown to the core or whitelisted */
this->next = NULL;
this->code = tpl_code;
this->size = tpl_link;
*prev = this;
prev = &this->next;
- printk(KERN_DEBUG
- "%s: queuing CIS tuple 0x%02x length %u\n",
- mmc_hostname(card->host), tpl_code, tpl_link);
+
+ if (ret == -ENOENT) {
+ /* warn about unknown tuples */
+ printk(KERN_WARNING "%s: queuing unknown"
+ " CIS tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host),
+ tpl_code, tpl_link);
+ }
+
/* keep on analyzing tuples */
ret = 0;
+ } else {
+ /*
+ * We don't need the tuple anymore if it was
+ * successfully parsed by the SDIO core or if it is
+ * not going to be queued for a driver.
+ */
+ kfree(this);
}
ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e04b751680d0..9d405b181781 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -251,6 +251,14 @@ config MMC_MVSDIO
To compile this driver as a module, choose M here: the
module will be called mvsdio.
+config MMC_DAVINCI
+ tristate "TI DAVINCI Multimedia Card Interface support"
+ depends on ARCH_DAVINCI
+ help
+ This selects the TI DAVINCI Multimedia card Interface.
+ If you have an DAVINCI board with a Multimedia Card slot,
+ say Y or M here. If unsure, say N.
+
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER && !HIGHMEM && HAS_DMA
@@ -357,3 +365,22 @@ config MMC_VIA_SDMMC
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config SDH_BFIN
+ tristate "Blackfin Secure Digital Host support"
+ depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512))
+ help
+ If you say yes here you will get support for the Blackfin on-chip
+ Secure Digital Host interface. This includes support for MMC and
+ SD cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_sdh.
+
+ If unsure, say N.
+
+config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround"
+ depends on SDH_BFIN
+ help
+ If you say yes here SD-Cards may work on the EZkit.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index abcb0400e06d..ded4d8cdd9d7 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
+obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
+obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc25586b7ee1..8072128e933b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -25,6 +25,8 @@
#include <linux/stat.h>
#include <linux/mmc/host.h>
+
+#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
#include <asm/io.h>
@@ -92,6 +94,7 @@ struct atmel_mci_dma {
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
* @mode_reg: Value of the MR register.
+ * @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
@@ -155,6 +158,7 @@ struct atmel_mci {
bool need_clock_update;
bool need_reset;
u32 mode_reg;
+ u32 cfg_reg;
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
@@ -223,6 +227,19 @@ static bool mci_has_rwproof(void)
}
/*
+ * The new MCI2 module isn't 100% compatible with the old MCI module,
+ * and it has a few nice features which we want to use...
+ */
+static inline bool atmci_is_mci2(void)
+{
+ if (cpu_is_at91sam9g45())
+ return true;
+
+ return false;
+}
+
+
+/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
@@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v)
buf[MCI_BLKR / 4],
buf[MCI_BLKR / 4] & 0xffff,
(buf[MCI_BLKR / 4] >> 16) & 0xffff);
+ if (atmci_is_mci2())
+ seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+ if (atmci_is_mci2()) {
+ u32 val;
+
+ val = buf[MCI_DMA / 4];
+ seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
+ val, val & 3,
+ ((val >> 4) & 3) ?
+ 1 << (((val >> 4) & 3) + 1) : 1,
+ val & MCI_DMAEN ? " DMAEN" : "");
+
+ val = buf[MCI_CFG / 4];
+ seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
+ val,
+ val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
+ val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
+ val & MCI_CFG_HSMODE ? " HSMODE" : "",
+ val & MCI_CFG_LSYNC ? " LSYNC" : "");
+ }
+
kfree(buf);
return 0;
@@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg)
dev_vdbg(&host->pdev->dev, "DMA complete\n");
+ if (atmci_is_mci2())
+ /* Disable DMA hardware handshaking on MCI */
+ mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
+
atmci_dma_cleanup(host);
/*
@@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg)
}
static int
-atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
@@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
+ if (atmci_is_mci2())
+ mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
+
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
@@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
desc->callback_param = host;
- desc->tx_submit(desc);
-
- /* Go! */
- chan->device->device_issue_pending(chan);
return 0;
unmap_exit:
@@ -652,13 +693,26 @@ unmap_exit:
return -ENOMEM;
}
+static void atmci_submit_data(struct atmel_mci *host)
+{
+ struct dma_chan *chan = host->data_chan;
+ struct dma_async_tx_descriptor *desc = host->dma.data_desc;
+
+ if (chan) {
+ desc->tx_submit(desc);
+ chan->device->device_issue_pending(chan);
+ }
+}
+
#else /* CONFIG_MMC_ATMELMCI_DMA */
-static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
return -ENOSYS;
}
+static void atmci_submit_data(struct atmel_mci *host) {}
+
static void atmci_stop_dma(struct atmel_mci *host)
{
/* Data transfer was stopped by the interrupt handler */
@@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
* Returns a mask of interrupt flags to be enabled after the whole
* request has been prepared.
*/
-static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
+static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
@@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
host->data = data;
iflags = ATMCI_DATA_ERROR_FLAGS;
- if (atmci_submit_data_dma(host, data)) {
+ if (atmci_prepare_data_dma(host, data)) {
host->data_chan = NULL;
/*
@@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host,
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->need_reset = false;
}
mci_writel(host, SDCR, slot->sdc_reg);
@@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host,
while (!(mci_readl(host, SR) & MCI_CMDRDY))
cpu_relax();
}
+ iflags = 0;
data = mrq->data;
if (data) {
atmci_set_timeout(host, slot, data);
@@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host,
| MCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
+
+ iflags |= atmci_prepare_data(host, data);
}
- iflags = MCI_CMDRDY;
+ iflags |= MCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
atmci_start_command(host, cmd, cmdflags);
if (data)
- iflags |= atmci_submit_data(host, data);
+ atmci_submit_data(host);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
@@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_enable(host->mck);
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
}
/*
@@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data)
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
@@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
#ifdef CONFIG_MMC_ATMELMCI_DMA
static bool filter(struct dma_chan *chan, void *slave)
{
- struct dw_dma_slave *dws = slave;
+ struct mci_dma_data *sl = slave;
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
+ if (sl && find_slave_dev(sl) == chan->device->dev) {
+ chan->private = slave_data_ptr(sl);
return true;
- } else
+ } else {
return false;
+ }
}
+
+static void atmci_configure_dma(struct atmel_mci *host)
+{
+ struct mci_platform_data *pdata;
+
+ if (host == NULL)
+ return;
+
+ pdata = host->pdev->dev.platform_data;
+
+ if (pdata && find_slave_dev(pdata->dma_slave)) {
+ dma_cap_mask_t mask;
+
+ setup_dma_addr(pdata->dma_slave,
+ host->mapbase + MCI_TDR,
+ host->mapbase + MCI_RDR);
+
+ /* Try to grab a DMA channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma.chan =
+ dma_request_channel(mask, filter, pdata->dma_slave);
+ }
+ if (!host->dma.chan)
+ dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
+ else
+ dev_info(&host->pdev->dev,
+ "Using %s for DMA transfers\n",
+ dma_chan_name(host->dma.chan));
+}
+#else
+static void atmci_configure_dma(struct atmel_mci *host) {}
#endif
static int __init atmci_probe(struct platform_device *pdev)
@@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev)
if (ret)
goto err_request_irq;
-#ifdef CONFIG_MMC_ATMELMCI_DMA
- if (pdata->dma_slave.dma_dev) {
- struct dw_dma_slave *dws = &pdata->dma_slave;
- dma_cap_mask_t mask;
-
- dws->tx_reg = regs->start + MCI_TDR;
- dws->rx_reg = regs->start + MCI_RDR;
-
- /* Try to grab a DMA channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma.chan = dma_request_channel(mask, filter, dws);
- }
- if (!host->dma.chan)
- dev_notice(&pdev->dev, "DMA not available, using PIO\n");
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
+ atmci_configure_dma(host);
platform_set_drvdata(pdev, host);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
new file mode 100644
index 000000000000..3343a57355cc
--- /dev/null
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -0,0 +1,639 @@
+/*
+ * bfin_sdh.c - Analog Devices Blackfin SDH Controller
+ *
+ * Copyright (C) 2007-2009 Analog Device Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-sdh"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/proc_fs.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
+
+#if defined(CONFIG_BF51x)
+#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
+#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
+#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
+#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
+#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
+#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
+#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
+#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
+#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
+#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
+#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
+#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
+#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
+#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
+#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
+#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
+#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
+#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
+#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
+#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
+#define bfin_read_SDH_CFG bfin_read_RSI_CFG
+#define bfin_write_SDH_CFG bfin_write_RSI_CFG
+#endif
+
+struct dma_desc_array {
+ unsigned long start_addr;
+ unsigned short cfg;
+ unsigned short x_count;
+ short x_modify;
+} __packed;
+
+struct sdh_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+ int stat_irq;
+ int dma_ch;
+ int dma_dir;
+ struct dma_desc_array *sg_cpu;
+ dma_addr_t sg_dma;
+ int dma_len;
+
+ unsigned int imask;
+ unsigned int power_mode;
+ unsigned int clk_div;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+static void sdh_stop_clock(struct sdh_host *host)
+{
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
+ SSYNC();
+}
+
+static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask |= mask;
+ bfin_write_SDH_MASK0(mask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask &= ~mask;
+ bfin_write_SDH_MASK0(host->imask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
+{
+ unsigned int length;
+ unsigned int data_ctl;
+ unsigned int dma_cfg;
+ struct scatterlist *sg;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
+ host->data = data;
+ data_ctl = 0;
+ dma_cfg = 0;
+
+ length = data->blksz * data->blocks;
+ bfin_write_SDH_DATA_LGTH(length);
+
+ if (data->flags & MMC_DATA_STREAM)
+ data_ctl |= DTX_MODE;
+
+ if (data->flags & MMC_DATA_READ)
+ data_ctl |= DTX_DIR;
+ /* Only supports power-of-2 block size */
+ if (data->blksz & (data->blksz - 1))
+ return -EINVAL;
+ data_ctl |= ((ffs(data->blksz) - 1) << 4);
+
+ bfin_write_SDH_DATA_CTL(data_ctl);
+
+ bfin_write_SDH_DATA_TIMER(0xFFFF);
+ SSYNC();
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ dma_cfg |= WNR;
+ } else
+ host->dma_dir = DMA_TO_DEVICE;
+
+ sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
+ host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
+#if defined(CONFIG_BF54x)
+ dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
+ {
+ int i;
+ for_each_sg(data->sg, sg, host->dma_len, i) {
+ host->sg_cpu[i].start_addr = sg_dma_address(sg);
+ host->sg_cpu[i].cfg = dma_cfg;
+ host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
+ host->sg_cpu[i].x_modify = 4;
+ dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
+ "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
+ i, host->sg_cpu[i].start_addr,
+ host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
+ host->sg_cpu[i].x_modify);
+ }
+ }
+ flush_dcache_range((unsigned int)host->sg_cpu,
+ (unsigned int)host->sg_cpu +
+ host->dma_len * sizeof(struct dma_desc_array));
+ /* Set the last descriptor to stop mode */
+ host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
+ host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
+
+ set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
+ set_dma_x_count(host->dma_ch, 0);
+ set_dma_x_modify(host->dma_ch, 0);
+ set_dma_config(host->dma_ch, dma_cfg);
+#elif defined(CONFIG_BF51x)
+ /* RSI DMA doesn't work in array mode */
+ dma_cfg |= WDSIZE_32 | DMAEN;
+ set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
+ set_dma_x_count(host->dma_ch, length / 4);
+ set_dma_x_modify(host->dma_ch, 4);
+ set_dma_config(host->dma_ch, dma_cfg);
+#endif
+ bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
+
+ SSYNC();
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
+ return 0;
+}
+
+static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
+{
+ unsigned int sdh_cmd;
+ unsigned int stat_mask;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ sdh_cmd = 0;
+ stat_mask = 0;
+
+ sdh_cmd |= cmd->opcode;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ sdh_cmd |= CMD_RSP;
+ stat_mask |= CMD_RESP_END;
+ } else {
+ stat_mask |= CMD_SENT;
+ }
+
+ if (cmd->flags & MMC_RSP_136)
+ sdh_cmd |= CMD_L_RSP;
+
+ stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
+
+ sdh_enable_stat_irq(host, stat_mask);
+
+ bfin_write_SDH_ARGUMENT(cmd->arg);
+ bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
+ SSYNC();
+}
+
+static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
+{
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
+ if (!cmd)
+ return 0;
+
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = bfin_read_SDH_RESPONSE0();
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[1] = bfin_read_SDH_RESPONSE1();
+ cmd->resp[2] = bfin_read_SDH_RESPONSE2();
+ cmd->resp[3] = bfin_read_SDH_RESPONSE3();
+ }
+ }
+ if (stat & CMD_TIME_OUT)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
+ cmd->error = -EILSEQ;
+
+ sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
+
+ if (host->data && !cmd->error) {
+ if (host->data->flags & MMC_DATA_WRITE) {
+ ret = sdh_setup_data(host, host->data);
+ if (ret)
+ return 0;
+ }
+
+ sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
+ } else
+ sdh_finish_request(host, host->mrq);
+
+ return 1;
+}
+
+static int sdh_data_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
+ if (!data)
+ return 0;
+
+ disable_dma(host->dma_ch);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ if (stat & DAT_TIME_OUT)
+ data->error = -ETIMEDOUT;
+ else if (stat & DAT_CRC_FAIL)
+ data->error = -EILSEQ;
+ else if (stat & (RX_OVERRUN | TX_UNDERRUN))
+ data->error = -EIO;
+
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
+ bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
+ DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
+ bfin_write_SDH_DATA_CTL(0);
+ SSYNC();
+
+ host->data = NULL;
+ if (host->mrq->stop) {
+ sdh_stop_clock(host);
+ sdh_start_cmd(host, host->mrq->stop);
+ } else {
+ sdh_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdh_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+ host->data = mrq->data;
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
+ ret = sdh_setup_data(host, mrq->data);
+ if (ret)
+ return;
+ }
+
+ sdh_start_cmd(host, mrq->cmd);
+}
+
+static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdh_host *host;
+ unsigned long flags;
+ u16 clk_ctl = 0;
+ u16 pwr_ctl = 0;
+ u16 cfg;
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (ios->clock) {
+ unsigned long sys_clk, ios_clk;
+ unsigned char clk_div;
+ ios_clk = 2 * ios->clock;
+ sys_clk = get_sclk();
+ clk_div = sys_clk / ios_clk;
+ if (sys_clk % ios_clk == 0)
+ clk_div -= 1;
+ clk_div = min_t(unsigned char, clk_div, 0xFF);
+ clk_ctl |= clk_div;
+ clk_ctl |= CLK_E;
+ host->clk_div = clk_div;
+ } else
+ sdh_stop_clock(host);
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ pwr_ctl |= ROD_CTL;
+#else
+ pwr_ctl |= SD_CMD_OD | ROD_CTL;
+#endif
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ cfg = bfin_read_SDH_CFG();
+ cfg &= ~PD_SDDAT3;
+ cfg |= PUP_SDDAT3;
+ /* Enable 4 bit SDIO */
+ cfg |= (SD4E | MWE);
+ bfin_write_SDH_CFG(cfg);
+ clk_ctl |= WIDE_BUS;
+ } else {
+ cfg = bfin_read_SDH_CFG();
+ cfg |= MWE;
+ bfin_write_SDH_CFG(cfg);
+ }
+
+ bfin_write_SDH_CLK_CTL(clk_ctl);
+
+ host->power_mode = ios->power_mode;
+ if (ios->power_mode == MMC_POWER_ON)
+ pwr_ctl |= PWR_ON;
+
+ bfin_write_SDH_PWR_CTL(pwr_ctl);
+ SSYNC();
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
+ host->clk_div,
+ host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
+ ios->clock);
+}
+
+static const struct mmc_host_ops sdh_ops = {
+ .request = sdh_request,
+ .set_ios = sdh_set_ios,
+};
+
+static irqreturn_t sdh_dma_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
+ get_dma_curr_irqstat(host->dma_ch));
+ clear_dma_irqstat(host->dma_ch);
+ SSYNC();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sdh_stat_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+ unsigned int status;
+ int handled = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+ status = bfin_read_SDH_E_STATUS();
+ if (status & SD_CARD_DET) {
+ mmc_detect_change(host->mmc, 0);
+ bfin_write_SDH_E_STATUS(SD_CARD_DET);
+ }
+ status = bfin_read_SDH_STATUS();
+ if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
+ handled |= sdh_cmd_done(host, status);
+ bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
+ CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
+ SSYNC();
+ }
+
+ status = bfin_read_SDH_STATUS();
+ if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
+ handled |= sdh_data_done(host, status);
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int __devinit sdh_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct sdh_host *host;
+ struct bfin_sd_host *drv_data = get_sdh_data(pdev);
+ int ret;
+
+ if (!drv_data) {
+ dev_err(&pdev->dev, "missing platform driver data\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmc->ops = &sdh_ops;
+ mmc->max_phys_segs = 32;
+ mmc->max_seg_size = 1 << 16;
+ mmc->max_blk_size = 1 << 11;
+ mmc->max_blk_count = 1 << 11;
+ mmc->max_req_size = PAGE_SIZE;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->f_max = get_sclk();
+ mmc->f_min = mmc->f_max >> 9;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ spin_lock_init(&host->lock);
+ host->irq = drv_data->irq_int0;
+ host->dma_ch = drv_data->dma_chan;
+
+ ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA channel\n");
+ goto out1;
+ }
+
+ ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA irq\n");
+ goto out2;
+ }
+
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
+ if (host->sg_cpu == NULL) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ platform_set_drvdata(pdev, mmc);
+ mmc_add_host(mmc);
+
+ ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request status irq\n");
+ goto out3;
+ }
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request peripheral pins\n");
+ goto out4;
+ }
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
+ * mmc stack will do the detection.
+ */
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+
+ return 0;
+
+out4:
+ free_irq(host->irq, host);
+out3:
+ mmc_remove_host(mmc);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+out2:
+ free_dma(host->dma_ch);
+out1:
+ mmc_free_host(mmc);
+ out:
+ return ret;
+}
+
+static int __devexit sdh_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (mmc) {
+ struct sdh_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ sdh_stop_clock(host);
+ free_irq(host->irq, host);
+ free_dma(host->dma_ch);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ mmc_free_host(mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdh_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+ int ret = 0;
+
+ if (mmc)
+ ret = mmc_suspend_host(mmc, state);
+
+ bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
+ peripheral_free_list(drv_data->pin_req);
+
+ return ret;
+}
+
+static int sdh_resume(struct platform_device *dev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+ int ret = 0;
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request peripheral pins\n");
+ return ret;
+ }
+
+ bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+
+ if (mmc)
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+#else
+# define sdh_suspend NULL
+# define sdh_resume NULL
+#endif
+
+static struct platform_driver sdh_driver = {
+ .probe = sdh_probe,
+ .remove = __devexit_p(sdh_remove),
+ .suspend = sdh_suspend,
+ .resume = sdh_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sdh_init(void)
+{
+ return platform_driver_register(&sdh_driver);
+}
+module_init(sdh_init);
+
+static void __exit sdh_exit(void)
+{
+ platform_driver_unregister(&sdh_driver);
+}
+module_exit(sdh_exit);
+
+MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
+MODULE_AUTHOR("Cliff Cai, Roy Huang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
new file mode 100644
index 000000000000..dd45e7c3517e
--- /dev/null
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -0,0 +1,1349 @@
+/*
+ * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Original author: Purushotam Kumar
+ * Copyright (C) 2009 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/mmc.h>
+
+#include <mach/mmc.h>
+#include <mach/edma.h>
+
+/*
+ * Register Definitions
+ */
+#define DAVINCI_MMCCTL 0x00 /* Control Register */
+#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
+#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
+#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
+#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
+#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
+#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
+#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
+#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
+#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
+#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
+#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
+#define DAVINCI_MMCCMD 0x30 /* Command Register */
+#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
+#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
+#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
+#define DAVINCI_MMCETOK 0x4C
+#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
+#define DAVINCI_MMCCKC 0x54
+#define DAVINCI_MMCTORC 0x58
+#define DAVINCI_MMCTODC 0x5C
+#define DAVINCI_MMCBLNC 0x60
+#define DAVINCI_SDIOCTL 0x64
+#define DAVINCI_SDIOST0 0x68
+#define DAVINCI_SDIOEN 0x6C
+#define DAVINCI_SDIOST 0x70
+#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
+
+/* DAVINCI_MMCCTL definitions */
+#define MMCCTL_DATRST (1 << 0)
+#define MMCCTL_CMDRST (1 << 1)
+#define MMCCTL_WIDTH_4_BIT (1 << 2)
+#define MMCCTL_DATEG_DISABLED (0 << 6)
+#define MMCCTL_DATEG_RISING (1 << 6)
+#define MMCCTL_DATEG_FALLING (2 << 6)
+#define MMCCTL_DATEG_BOTH (3 << 6)
+#define MMCCTL_PERMDR_LE (0 << 9)
+#define MMCCTL_PERMDR_BE (1 << 9)
+#define MMCCTL_PERMDX_LE (0 << 10)
+#define MMCCTL_PERMDX_BE (1 << 10)
+
+/* DAVINCI_MMCCLK definitions */
+#define MMCCLK_CLKEN (1 << 8)
+#define MMCCLK_CLKRT_MASK (0xFF << 0)
+
+/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
+#define MMCST0_DATDNE BIT(0) /* data done */
+#define MMCST0_BSYDNE BIT(1) /* busy done */
+#define MMCST0_RSPDNE BIT(2) /* command done */
+#define MMCST0_TOUTRD BIT(3) /* data read timeout */
+#define MMCST0_TOUTRS BIT(4) /* command response timeout */
+#define MMCST0_CRCWR BIT(5) /* data write CRC error */
+#define MMCST0_CRCRD BIT(6) /* data read CRC error */
+#define MMCST0_CRCRS BIT(7) /* command response CRC error */
+#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
+#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
+#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
+#define MMCST0_TRNDNE BIT(12) /* transfer done */
+
+/* DAVINCI_MMCST1 definitions */
+#define MMCST1_BUSY (1 << 0)
+
+/* DAVINCI_MMCCMD definitions */
+#define MMCCMD_CMD_MASK (0x3F << 0)
+#define MMCCMD_PPLEN (1 << 7)
+#define MMCCMD_BSYEXP (1 << 8)
+#define MMCCMD_RSPFMT_MASK (3 << 9)
+#define MMCCMD_RSPFMT_NONE (0 << 9)
+#define MMCCMD_RSPFMT_R1456 (1 << 9)
+#define MMCCMD_RSPFMT_R2 (2 << 9)
+#define MMCCMD_RSPFMT_R3 (3 << 9)
+#define MMCCMD_DTRW (1 << 11)
+#define MMCCMD_STRMTP (1 << 12)
+#define MMCCMD_WDATX (1 << 13)
+#define MMCCMD_INITCK (1 << 14)
+#define MMCCMD_DCLR (1 << 15)
+#define MMCCMD_DMATRIG (1 << 16)
+
+/* DAVINCI_MMCFIFOCTL definitions */
+#define MMCFIFOCTL_FIFORST (1 << 0)
+#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
+#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
+#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
+#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
+#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
+#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
+#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
+
+
+/* MMCSD Init clock in Hz in opendrain mode */
+#define MMCSD_INIT_CLOCK 200000
+
+/*
+ * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
+ * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
+ * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
+ * than the page or two that's otherwise typical. NR_SG == 16 gives at
+ * least the same throughput boost, using EDMA transfer linkage instead
+ * of spending CPU time copying pages.
+ */
+#define MAX_CCNT ((1 << 16) - 1)
+
+#define NR_SG 16
+
+static unsigned rw_threshold = 32;
+module_param(rw_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(rw_threshold,
+ "Read/Write threshold. Default = 32");
+
+static unsigned __initdata use_dma = 1;
+module_param(use_dma, uint, 0);
+MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
+
+struct mmc_davinci_host {
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ unsigned int mmc_input_clk;
+ void __iomem *base;
+ struct resource *mem_res;
+ int irq;
+ unsigned char bus_mode;
+
+#define DAVINCI_MMC_DATADIR_NONE 0
+#define DAVINCI_MMC_DATADIR_READ 1
+#define DAVINCI_MMC_DATADIR_WRITE 2
+ unsigned char data_dir;
+
+ /* buffer is used during PIO of one scatterlist segment, and
+ * is updated along with buffer_bytes_left. bytes_left applies
+ * to all N blocks of the PIO transfer.
+ */
+ u8 *buffer;
+ u32 buffer_bytes_left;
+ u32 bytes_left;
+
+ u32 rxdma, txdma;
+ bool use_dma;
+ bool do_dma;
+
+ /* Scatterlist DMA uses one or more parameter RAM entries:
+ * the main one (associated with rxdma or txdma) plus zero or
+ * more links. The entries for a given transfer differ only
+ * by memory buffer (address, length) and link field.
+ */
+ struct edmacc_param tx_template;
+ struct edmacc_param rx_template;
+ unsigned n_link;
+ u32 links[NR_SG - 1];
+
+ /* For PIO we walk scatterlists one segment at a time. */
+ unsigned int sg_len;
+ struct scatterlist *sg;
+
+ /* Version of the MMC/SD controller */
+ u8 version;
+ /* for ns in one cycle calculation */
+ unsigned ns_in_one_cycle;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+
+/* PIO only */
+static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
+{
+ host->buffer_bytes_left = sg_dma_len(host->sg);
+ host->buffer = sg_virt(host->sg);
+ if (host->buffer_bytes_left > host->bytes_left)
+ host->buffer_bytes_left = host->bytes_left;
+}
+
+static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
+ unsigned int n)
+{
+ u8 *p;
+ unsigned int i;
+
+ if (host->buffer_bytes_left == 0) {
+ host->sg = sg_next(host->data->sg);
+ mmc_davinci_sg_to_buf(host);
+ }
+
+ p = host->buffer;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+ host->buffer_bytes_left -= n;
+ host->bytes_left -= n;
+
+ /* NOTE: we never transfer more than rw_threshold bytes
+ * to/from the fifo here; there's no I/O overlap.
+ * This also assumes that access width( i.e. ACCWD) is 4 bytes
+ */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ for (i = 0; i < (n >> 2); i++) {
+ writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ } else {
+ for (i = 0; i < (n >> 2); i++) {
+ *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ }
+ host->buffer = p;
+}
+
+static void mmc_davinci_start_command(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ u32 cmd_reg = 0;
+ u32 im_val;
+
+ dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
+ cmd->opcode, cmd->arg,
+ ({ char *s;
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ s = ", R1/R5/R6/R7 response";
+ break;
+ case MMC_RSP_R1B:
+ s = ", R1b response";
+ break;
+ case MMC_RSP_R2:
+ s = ", R2 response";
+ break;
+ case MMC_RSP_R3:
+ s = ", R3/R4 response";
+ break;
+ default:
+ s = ", (R? response)";
+ break;
+ }; s; }));
+ host->cmd = cmd;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1B:
+ /* There's some spec confusion about when R1B is
+ * allowed, but if the card doesn't issue a BUSY
+ * then it's harmless for us to allow it.
+ */
+ cmd_reg |= MMCCMD_BSYEXP;
+ /* FALLTHROUGH */
+ case MMC_RSP_R1: /* 48 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R1456;
+ break;
+ case MMC_RSP_R2: /* 136 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R2;
+ break;
+ case MMC_RSP_R3: /* 48 bits, no CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R3;
+ break;
+ default:
+ cmd_reg |= MMCCMD_RSPFMT_NONE;
+ dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
+ mmc_resp_type(cmd));
+ break;
+ }
+
+ /* Set command index */
+ cmd_reg |= cmd->opcode;
+
+ /* Enable EDMA transfer triggers */
+ if (host->do_dma)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
+ host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ /* Setting whether command involves data transfer or not */
+ if (cmd->data)
+ cmd_reg |= MMCCMD_WDATX;
+
+ /* Setting whether stream or block transfer */
+ if (cmd->flags & MMC_DATA_STREAM)
+ cmd_reg |= MMCCMD_STRMTP;
+
+ /* Setting whether data read or write */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
+ cmd_reg |= MMCCMD_DTRW;
+
+ if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
+ cmd_reg |= MMCCMD_PPLEN;
+
+ /* set Command timeout */
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+
+ /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
+ im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DXRDY;
+ } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DRRDY;
+ }
+
+ /*
+ * Before non-DMA WRITE commands the controller needs priming:
+ * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
+ */
+ if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
+ davinci_fifo_data_trans(host, rw_threshold);
+
+ writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
+ writel(cmd_reg, host->base + DAVINCI_MMCCMD);
+ writel(im_val, host->base + DAVINCI_MMCIM);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* DMA infrastructure */
+
+static void davinci_abort_dma(struct mmc_davinci_host *host)
+{
+ int sync_dev;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ sync_dev = host->rxdma;
+ else
+ sync_dev = host->txdma;
+
+ edma_stop(sync_dev);
+ edma_clean_channel(sync_dev);
+}
+
+static void
+mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
+
+static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
+{
+ if (DMA_COMPLETE != ch_status) {
+ struct mmc_davinci_host *host = data;
+
+ /* Currently means: DMA Event Missed, or "null" transfer
+ * request was seen. In the future, TC errors (like bad
+ * addresses) might be presented too.
+ */
+ dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
+ (host->data->flags & MMC_DATA_WRITE)
+ ? "write" : "read");
+ host->data->error = -EIO;
+ mmc_davinci_xfer_done(host, host->data);
+ }
+}
+
+/* Set up tx or rx template, to be modified and updated later */
+static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
+ bool tx, struct edmacc_param *template)
+{
+ unsigned sync_dev;
+ const u16 acnt = 4;
+ const u16 bcnt = rw_threshold >> 2;
+ const u16 ccnt = 0;
+ u32 src_port = 0;
+ u32 dst_port = 0;
+ s16 src_bidx, dst_bidx;
+ s16 src_cidx, dst_cidx;
+
+ /*
+ * A-B Sync transfer: each DMA request is for one "frame" of
+ * rw_threshold bytes, broken into "acnt"-size chunks repeated
+ * "bcnt" times. Each segment needs "ccnt" such frames; since
+ * we tell the block layer our mmc->max_seg_size limit, we can
+ * trust (later) that it's within bounds.
+ *
+ * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
+ * EDMA will optimize memory operations to use larger bursts.
+ */
+ if (tx) {
+ sync_dev = host->txdma;
+
+ /* src_prt, ccnt, and link to be set up later */
+ src_bidx = acnt;
+ src_cidx = acnt * bcnt;
+
+ dst_port = host->mem_res->start + DAVINCI_MMCDXR;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else {
+ sync_dev = host->rxdma;
+
+ src_port = host->mem_res->start + DAVINCI_MMCDRR;
+ src_bidx = 0;
+ src_cidx = 0;
+
+ /* dst_prt, ccnt, and link to be set up later */
+ dst_bidx = acnt;
+ dst_cidx = acnt * bcnt;
+ }
+
+ /*
+ * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
+ * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
+ * parameter is ignored.
+ */
+ edma_set_src(sync_dev, src_port, INCR, W8BIT);
+ edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
+
+ edma_set_src_index(sync_dev, src_bidx, src_cidx);
+ edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
+
+ edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
+
+ edma_read_slot(sync_dev, template);
+
+ /* don't bother with irqs or chaining */
+ template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
+}
+
+static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ struct edmacc_param *template;
+ int channel, slot;
+ unsigned link;
+ struct scatterlist *sg;
+ unsigned sg_len;
+ unsigned bytes_left = host->bytes_left;
+ const unsigned shift = ffs(rw_threshold) - 1;;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ template = &host->tx_template;
+ channel = host->txdma;
+ } else {
+ template = &host->rx_template;
+ channel = host->rxdma;
+ }
+
+ /* We know sg_len and ccnt will never be out of range because
+ * we told the mmc layer which in turn tells the block layer
+ * to ensure that it only hands us one scatterlist segment
+ * per EDMA PARAM entry. Update the PARAM
+ * entries needed for each segment of this scatterlist.
+ */
+ for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
+ sg_len-- != 0 && bytes_left;
+ sg = sg_next(sg), slot = host->links[link++]) {
+ u32 buf = sg_dma_address(sg);
+ unsigned count = sg_dma_len(sg);
+
+ template->link_bcntrld = sg_len
+ ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
+ : 0xffff;
+
+ if (count > bytes_left)
+ count = bytes_left;
+ bytes_left -= count;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
+ template->src = buf;
+ else
+ template->dst = buf;
+ template->ccnt = count >> shift;
+
+ edma_write_slot(slot, template);
+ }
+
+ if (host->version == MMC_CTLR_VERSION_2)
+ edma_clear_event(channel);
+
+ edma_start(channel);
+}
+
+static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ int i;
+ int mask = rw_threshold - 1;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE));
+
+ /* no individual DMA segment should need a partial FIFO */
+ for (i = 0; i < host->sg_len; i++) {
+ if (sg_dma_len(data->sg + i) & mask) {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ return -1;
+ }
+ }
+
+ host->do_dma = 1;
+ mmc_davinci_send_dma_request(host, data);
+
+ return 0;
+}
+
+static void __init_or_module
+davinci_release_dma_channels(struct mmc_davinci_host *host)
+{
+ unsigned i;
+
+ if (!host->use_dma)
+ return;
+
+ for (i = 0; i < host->n_link; i++)
+ edma_free_slot(host->links[i]);
+
+ edma_free_channel(host->txdma);
+ edma_free_channel(host->rxdma);
+}
+
+static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+{
+ int r, i;
+
+ /* Acquire master DMA write channel */
+ r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
+ EVENTQ_DEFAULT);
+ if (r < 0) {
+ dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
+ "tx", r);
+ return r;
+ }
+ mmc_davinci_dma_setup(host, true, &host->tx_template);
+
+ /* Acquire master DMA read channel */
+ r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
+ EVENTQ_DEFAULT);
+ if (r < 0) {
+ dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
+ "rx", r);
+ goto free_master_write;
+ }
+ mmc_davinci_dma_setup(host, false, &host->rx_template);
+
+ /* Allocate parameter RAM slots, which will later be bound to a
+ * channel as needed to handle a scatterlist.
+ */
+ for (i = 0; i < ARRAY_SIZE(host->links); i++) {
+ r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
+ if (r < 0) {
+ dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
+ r);
+ break;
+ }
+ host->links[i] = r;
+ }
+ host->n_link = i;
+
+ return 0;
+
+free_master_write:
+ edma_free_channel(host->txdma);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
+{
+ int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
+ int timeout;
+ struct mmc_data *data = req->data;
+
+ if (host->version == MMC_CTLR_VERSION_2)
+ fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
+
+ host->data = data;
+ if (data == NULL) {
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+ writel(0, host->base + DAVINCI_MMCBLEN);
+ writel(0, host->base + DAVINCI_MMCNBLK);
+ return;
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
+ (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+ (data->flags & MMC_DATA_WRITE) ? "write" : "read",
+ data->blocks, data->blksz);
+ dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
+ data->timeout_clks, data->timeout_ns);
+ timeout = data->timeout_clks +
+ (data->timeout_ns / host->ns_in_one_cycle);
+ if (timeout > 0xffff)
+ timeout = 0xffff;
+
+ writel(timeout, host->base + DAVINCI_MMCTOD);
+ writel(data->blocks, host->base + DAVINCI_MMCNBLK);
+ writel(data->blksz, host->base + DAVINCI_MMCBLEN);
+
+ /* Configure the FIFO */
+ switch (data->flags & MMC_DATA_WRITE) {
+ case MMC_DATA_WRITE:
+ host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+
+ default:
+ host->data_dir = DAVINCI_MMC_DATADIR_READ;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+ }
+
+ host->buffer = NULL;
+ host->bytes_left = data->blocks * data->blksz;
+
+ /* For now we try to use DMA whenever we won't need partial FIFO
+ * reads or writes, either for the whole transfer (as tested here)
+ * or for any individual scatterlist segment (tested when we call
+ * start_dma_transfer).
+ *
+ * While we *could* change that, unusual block sizes are rarely
+ * used. The occasional fallback to PIO should't hurt.
+ */
+ if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
+ && mmc_davinci_start_dma_transfer(host, data) == 0) {
+ /* zero this to ensure we take no PIO paths */
+ host->bytes_left = 0;
+ } else {
+ /* Revert to CPU Copy */
+ host->sg_len = data->sg_len;
+ host->sg = host->data->sg;
+ mmc_davinci_sg_to_buf(host);
+ }
+}
+
+static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+ unsigned long timeout = jiffies + msecs_to_jiffies(900);
+ u32 mmcst1 = 0;
+
+ /* Card may still be sending BUSY after a previous operation,
+ * typically some kind of write. If so, we can't proceed yet.
+ */
+ while (time_before(jiffies, timeout)) {
+ mmcst1 = readl(host->base + DAVINCI_MMCST1);
+ if (!(mmcst1 & MMCST1_BUSY))
+ break;
+ cpu_relax();
+ }
+ if (mmcst1 & MMCST1_BUSY) {
+ dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
+ req->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, req);
+ return;
+ }
+
+ host->do_dma = 0;
+ mmc_davinci_prepare_data(host, req);
+ mmc_davinci_start_command(host, req->cmd);
+}
+
+static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
+ unsigned int mmc_req_freq)
+{
+ unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
+
+ mmc_pclk = host->mmc_input_clk;
+ if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
+ mmc_push_pull_divisor = ((unsigned int)mmc_pclk
+ / (2 * mmc_req_freq)) - 1;
+ else
+ mmc_push_pull_divisor = 0;
+
+ mmc_freq = (unsigned int)mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1));
+
+ if (mmc_freq > mmc_req_freq)
+ mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
+ /* Convert ns to clock cycles */
+ if (mmc_req_freq <= 400000)
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000));
+ else
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000000));
+
+ return mmc_push_pull_divisor;
+}
+
+static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ unsigned int open_drain_freq = 0, mmc_pclk = 0;
+ unsigned int mmc_push_pull_freq = 0;
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ u32 temp;
+
+ /* Ignoring the init clock value passed for fixing the inter
+ * operability with different cards.
+ */
+ open_drain_freq = ((unsigned int)mmc_pclk
+ / (2 * MMCSD_INIT_CLOCK)) - 1;
+
+ if (open_drain_freq > 0xFF)
+ open_drain_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= open_drain_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ /* Convert ns to clock cycles */
+ host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
+ } else {
+ u32 temp;
+ mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
+
+ if (mmc_push_pull_freq > 0xFF)
+ mmc_push_pull_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= mmc_push_pull_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+ }
+}
+
+static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ unsigned int mmc_pclk = 0;
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ mmc_pclk = host->mmc_input_clk;
+ dev_dbg(mmc_dev(host->mmc),
+ "clock %dHz busmode %d powermode %d Vdd %04x\n",
+ ios->clock, ios->bus_mode, ios->power_mode,
+ ios->vdd);
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n");
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ }
+
+ calculate_clk_divider(mmc, ios);
+
+ host->bus_mode = ios->bus_mode;
+ if (ios->power_mode == MMC_POWER_UP) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ bool lose = true;
+
+ /* Send clock cycles, poll completion */
+ writel(0, host->base + DAVINCI_MMCARGHL);
+ writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
+ while (time_before(jiffies, timeout)) {
+ u32 tmp = readl(host->base + DAVINCI_MMCST0);
+
+ if (tmp & MMCST0_RSPDNE) {
+ lose = false;
+ break;
+ }
+ cpu_relax();
+ }
+ if (lose)
+ dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
+ }
+
+ /* FIXME on power OFF, reset things ... */
+}
+
+static void
+mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ host->data = NULL;
+
+ if (host->do_dma) {
+ davinci_abort_dma(host);
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ host->do_dma = false;
+ }
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+
+ if (!data->stop || (host->cmd && host->cmd->error)) {
+ mmc_request_done(host->mmc, data->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ } else
+ mmc_davinci_start_command(host, data->stop);
+}
+
+static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
+ cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
+ cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ if (cmd->error == -ETIMEDOUT)
+ cmd->mrq->cmd->retries = 0;
+ mmc_request_done(host->mmc, cmd->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ }
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ u32 temp;
+
+ /* reset command and data state machines */
+ temp = readl(host->base + DAVINCI_MMCCTL);
+ writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+
+ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
+ udelay(10);
+ writel(temp, host->base + DAVINCI_MMCCTL);
+}
+
+static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
+ unsigned int status, qstatus;
+ int end_command = 0;
+ int end_transfer = 0;
+ struct mmc_data *data = host->data;
+
+ if (host->cmd == NULL && host->data == NULL) {
+ status = readl(host->base + DAVINCI_MMCST0);
+ dev_dbg(mmc_dev(host->mmc),
+ "Spurious interrupt 0x%04x\n", status);
+ /* Disable the interrupt from mmcsd */
+ writel(0, host->base + DAVINCI_MMCIM);
+ return IRQ_NONE;
+ }
+
+ status = readl(host->base + DAVINCI_MMCST0);
+ qstatus = status;
+
+ /* handle FIFO first when using PIO for data.
+ * bytes_left will decrease to zero as I/O progress and status will
+ * read zero over iteration because this controller status
+ * register(MMCST0) reports any status only once and it is cleared
+ * by read. So, it is not unbouned loop even in the case of
+ * non-dma.
+ */
+ while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
+ davinci_fifo_data_trans(host, rw_threshold);
+ status = readl(host->base + DAVINCI_MMCST0);
+ if (!status)
+ break;
+ qstatus |= status;
+ }
+
+ if (qstatus & MMCST0_DATDNE) {
+ /* All blocks sent/received, and CRC checks passed */
+ if (data != NULL) {
+ if ((host->do_dma == 0) && (host->bytes_left > 0)) {
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ }
+ end_transfer = 1;
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "DATDNE with no host->data\n");
+ }
+ }
+
+ if (qstatus & MMCST0_TOUTRD) {
+ /* Read data timeout */
+ data->error = -ETIMEDOUT;
+ end_transfer = 1;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "read data timeout, status %x\n",
+ qstatus);
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
+ /* Data CRC error */
+ data->error = -EILSEQ;
+ end_transfer = 1;
+
+ /* NOTE: this controller uses CRCWR to report both CRC
+ * errors and timeouts (on writes). MMCDRSP values are
+ * only weakly documented, but 0x9f was clearly a timeout
+ * case and the two three-bit patterns in various SD specs
+ * (101, 010) aren't part of it ...
+ */
+ if (qstatus & MMCST0_CRCWR) {
+ u32 temp = readb(host->base + DAVINCI_MMCDRSP);
+
+ if (temp == 0x9f)
+ data->error = -ETIMEDOUT;
+ }
+ dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
+ (qstatus & MMCST0_CRCWR) ? "write" : "read",
+ (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & MMCST0_TOUTRS) {
+ /* Command timeout */
+ if (host->cmd) {
+ dev_dbg(mmc_dev(host->mmc),
+ "CMD%d timeout, status %x\n",
+ host->cmd->opcode, qstatus);
+ host->cmd->error = -ETIMEDOUT;
+ if (data) {
+ end_transfer = 1;
+ davinci_abort_data(host, data);
+ } else
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_CRCRS) {
+ /* Command CRC error */
+ dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
+ if (host->cmd) {
+ host->cmd->error = -EILSEQ;
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_RSPDNE) {
+ /* End of command phase */
+ end_command = (int) host->cmd;
+ }
+
+ if (end_command)
+ mmc_davinci_cmd_done(host, host->cmd);
+ if (end_transfer)
+ mmc_davinci_xfer_done(host, data);
+ return IRQ_HANDLED;
+}
+
+static int mmc_davinci_get_cd(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_cd)
+ return -ENOSYS;
+ return config->get_cd(pdev->id);
+}
+
+static int mmc_davinci_get_ro(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_ro)
+ return -ENOSYS;
+ return config->get_ro(pdev->id);
+}
+
+static struct mmc_host_ops mmc_davinci_ops = {
+ .request = mmc_davinci_request,
+ .set_ios = mmc_davinci_set_ios,
+ .get_cd = mmc_davinci_get_cd,
+ .get_ro = mmc_davinci_get_ro,
+};
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_CPU_FREQ
+static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mmc_davinci_host *host;
+ unsigned int mmc_pclk;
+ struct mmc_host *mmc;
+ unsigned long flags;
+
+ host = container_of(nb, struct mmc_davinci_host, freq_transition);
+ mmc = host->mmc;
+ mmc_pclk = clk_get_rate(host->clk);
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ spin_lock_irqsave(&mmc->lock, flags);
+ host->mmc_input_clk = mmc_pclk;
+ calculate_clk_divider(mmc, &mmc->ios);
+ spin_unlock_irqrestore(&mmc->lock, flags);
+ }
+
+ return 0;
+}
+
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
+
+ return cpufreq_register_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+ cpufreq_unregister_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#else
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ return 0;
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+}
+#endif
+static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+{
+ /* DAT line portion is diabled and in reset state */
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+
+ /* CMD line portion is diabled and in reset state */
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
+ host->base + DAVINCI_MMCCTL);
+
+ udelay(10);
+
+ writel(0, host->base + DAVINCI_MMCCLK);
+ writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+ writel(0xFFFF, host->base + DAVINCI_MMCTOD);
+
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
+ host->base + DAVINCI_MMCCTL);
+
+ udelay(10);
+}
+
+static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+{
+ struct davinci_mmc_config *pdata = pdev->dev.platform_data;
+ struct mmc_davinci_host *host = NULL;
+ struct mmc_host *mmc = NULL;
+ struct resource *r, *mem = NULL;
+ int ret = 0, irq = 0;
+ size_t mem_size;
+
+ /* REVISIT: when we're fully converted, fail if pdata is NULL */
+
+ ret = -ENODEV;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ goto out;
+
+ ret = -EBUSY;
+ mem_size = resource_size(r);
+ mem = request_mem_region(r->start, mem_size, pdev->name);
+ if (!mem)
+ goto out;
+
+ ret = -ENOMEM;
+ mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
+ if (!mmc)
+ goto out;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc; /* Important */
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r)
+ goto out;
+ host->rxdma = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r)
+ goto out;
+ host->txdma = r->start;
+
+ host->mem_res = mem;
+ host->base = ioremap(mem->start, mem_size);
+ if (!host->base)
+ goto out;
+
+ ret = -ENXIO;
+ host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out;
+ }
+ clk_enable(host->clk);
+ host->mmc_input_clk = clk_get_rate(host->clk);
+
+ init_mmcsd_host(host);
+
+ host->use_dma = use_dma;
+ host->irq = irq;
+
+ if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
+ host->use_dma = 0;
+
+ /* REVISIT: someday, support IRQ-driven card detection. */
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ if (!pdata || pdata->wires == 4 || pdata->wires == 0)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ host->version = pdata->version;
+
+ mmc->ops = &mmc_davinci_ops;
+ mmc->f_min = 312500;
+ mmc->f_max = 25000000;
+ if (pdata && pdata->max_freq)
+ mmc->f_max = pdata->max_freq;
+ if (pdata && pdata->caps)
+ mmc->caps |= pdata->caps;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /* With no iommu coalescing pages, each phys_seg is a hw_seg.
+ * Each hw_seg uses one EDMA parameter RAM slot, always one
+ * channel and then usually some linked slots.
+ */
+ mmc->max_hw_segs = 1 + host->n_link;
+ mmc->max_phys_segs = mmc->max_hw_segs;
+
+ /* EDMA limit per hw segment (one or two MBytes) */
+ mmc->max_seg_size = MAX_CCNT * rw_threshold;
+
+ /* MMC/SD controller limits for multiblock requests */
+ mmc->max_blk_size = 4095; /* BLEN is 12 bits */
+ mmc->max_blk_count = 65535; /* NBLK is 16 bits */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
+ dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
+ dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_davinci_cpufreq_register(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpufreq\n");
+ goto cpu_freq_fail;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto out;
+
+ ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+ if (ret)
+ goto out;
+
+ rename_region(mem, mmc_hostname(mmc));
+
+ dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
+ host->use_dma ? "DMA" : "PIO",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+
+ return 0;
+
+out:
+ mmc_davinci_cpufreq_deregister(host);
+cpu_freq_fail:
+ if (host) {
+ davinci_release_dma_channels(host);
+
+ if (host->clk) {
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ }
+
+ if (host->base)
+ iounmap(host->base);
+ }
+
+ if (mmc)
+ mmc_free_host(mmc);
+
+ if (mem)
+ release_resource(mem);
+
+ dev_dbg(&pdev->dev, "probe err %d\n", ret);
+
+ return ret;
+}
+
+static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ if (host) {
+ mmc_davinci_cpufreq_deregister(host);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+
+ davinci_release_dma_channels(host);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ release_resource(host->mem_res);
+
+ mmc_free_host(host->mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ return mmc_suspend_host(host->mmc, msg);
+}
+
+static int davinci_mmcsd_resume(struct platform_device *pdev)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ return mmc_resume_host(host->mmc);
+}
+#else
+#define davinci_mmcsd_suspend NULL
+#define davinci_mmcsd_resume NULL
+#endif
+
+static struct platform_driver davinci_mmcsd_driver = {
+ .driver = {
+ .name = "davinci_mmc",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(davinci_mmcsd_remove),
+ .suspend = davinci_mmcsd_suspend,
+ .resume = davinci_mmcsd_resume,
+};
+
+static int __init davinci_mmcsd_init(void)
+{
+ return platform_driver_probe(&davinci_mmcsd_driver,
+ davinci_mmcsd_probe);
+}
+module_init(davinci_mmcsd_init);
+
+static void __exit davinci_mmcsd_exit(void)
+{
+ platform_driver_unregister(&davinci_mmcsd_driver);
+}
+module_exit(davinci_mmcsd_exit);
+
+MODULE_AUTHOR("Texas Instruments India");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 88671529c45d..60a2b69e54f5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct mxcmci_host *host = NULL;
- struct resource *r;
+ struct resource *iores, *r;
int ret = 0, irq;
printk(KERN_INFO "i.MX SDHC driver\n");
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (!iores || irq < 0)
return -EINVAL;
- r = request_mem_region(r->start, resource_size(r), pdev->name);
+ r = request_mem_region(iores->start, resource_size(iores), pdev->name);
if (!r)
return -EBUSY;
@@ -809,7 +809,7 @@ out_iounmap:
out_free:
mmc_free_host(mmc);
out_release_mem:
- release_mem_region(host->res->start, resource_size(host->res));
+ release_mem_region(iores->start, resource_size(iores));
return ret;
}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5f970e253e50..c6d7e8ecadbf 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
goto err_ioremap;
host->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(host->iclk))
+ if (IS_ERR(host->iclk)) {
+ ret = PTR_ERR(host->iclk);
goto err_free_mmc_host;
+ }
clk_enable(host->iclk);
host->fclk = clk_get(&pdev->dev, "fck");
@@ -1500,10 +1502,8 @@ err_free_irq:
err_free_fclk:
clk_put(host->fclk);
err_free_iclk:
- if (host->iclk != NULL) {
- clk_disable(host->iclk);
- clk_put(host->iclk);
- }
+ clk_disable(host->iclk);
+ clk_put(host->iclk);
err_free_mmc_host:
iounmap(host->virt_base);
err_ioremap:
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index bb47ff465c04..0d783f3e79ed 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev)
return ret;
}
-static struct dev_pm_ops pxamci_pm_ops = {
+static const struct dev_pm_ops pxamci_pm_ops = {
.suspend = pxamci_suspend,
.resume = pxamci_resume,
};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 941a4d35ef8d..d96e1abf2d64 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -820,7 +820,7 @@ fail_request:
static void finalize_request(struct s3cmci_host *host)
{
struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+ struct mmc_command *cmd;
int debug_as_failure = 0;
if (host->complete_what != COMPLETION_FINALIZE)
@@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host)
if (!mrq)
return;
+ cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
if (cmd->data && (cmd->error == 0) &&
(cmd->data->error == 0)) {
@@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
if (pdata->no_wprotect)
return 0;
- ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
-
- if (pdata->wprotect_invert)
- ret = !ret;
+ ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
+ ret ^= pdata->wprotect_invert;
return ret;
}
@@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
goto probe_free_irq;
}
- host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
+ host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
if (host->irq_cd >= 0) {
if (request_irq(host->irq_cd, s3cmci_irq_cd,
@@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev)
return mmc_resume_host(mmc);
}
-static struct dev_pm_ops s3cmci_pm = {
+static const struct dev_pm_ops s3cmci_pm = {
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e0356644d1aa..5c3a1767770a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
.resume = jmicron_resume,
};
+/* SysKonnect CardBus2SDIO extra registers */
+#define SYSKT_CTRL 0x200
+#define SYSKT_RDFIFO_STAT 0x204
+#define SYSKT_WRFIFO_STAT 0x208
+#define SYSKT_POWER_DATA 0x20c
+#define SYSKT_POWER_330 0xef
+#define SYSKT_POWER_300 0xf8
+#define SYSKT_POWER_184 0xcc
+#define SYSKT_POWER_CMD 0x20d
+#define SYSKT_POWER_START (1 << 7)
+#define SYSKT_POWER_STATUS 0x20e
+#define SYSKT_POWER_STATUS_OK (1 << 0)
+#define SYSKT_BOARD_REV 0x210
+#define SYSKT_CHIP_REV 0x211
+#define SYSKT_CONF_DATA 0x212
+#define SYSKT_CONF_DATA_1V8 (1 << 2)
+#define SYSKT_CONF_DATA_2V5 (1 << 1)
+#define SYSKT_CONF_DATA_3V3 (1 << 0)
+
+static int syskt_probe(struct sdhci_pci_chip *chip)
+{
+ if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
+ chip->pdev->class &= ~0x0000FF;
+ chip->pdev->class |= PCI_SDHCI_IFDMA;
+ }
+ return 0;
+}
+
+static int syskt_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int tm, ps;
+
+ u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
+ u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
+ dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
+ "board rev %d.%d, chip rev %d.%d\n",
+ board_rev >> 4, board_rev & 0xf,
+ chip_rev >> 4, chip_rev & 0xf);
+ if (chip_rev >= 0x20)
+ slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
+
+ writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
+ writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
+ udelay(50);
+ tm = 10; /* Wait max 1 ms */
+ do {
+ ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
+ if (ps & SYSKT_POWER_STATUS_OK)
+ break;
+ udelay(100);
+ } while (--tm);
+ if (!tm) {
+ dev_err(&slot->chip->pdev->dev,
+ "power regulator never stabilized");
+ writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_syskt = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
+ .probe = syskt_probe,
+ .probe_slot = syskt_probe_slot,
+};
+
static int via_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->revision == 0x10)
@@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_SYSKONNECT,
+ .device = 0x8000,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_syskt,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_VIA,
.device = 0x95d0,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 91991b460c45..7cccc8523747 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -591,7 +591,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
disable_mmc_irqs(host, TMIO_MASK_ALL);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
- IRQF_TRIGGER_FALLING, "tmio-mmc", host);
+ IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
goto unmap_cnf;
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 7c302d55910e..66123419f65d 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -216,7 +216,7 @@ static int nomadik_nand_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops nomadik_nand_pm_ops = {
+static const struct dev_pm_ops nomadik_nand_pm_ops = {
.suspend = nomadik_nand_suspend,
.resume = nomadik_nand_resume,
};
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 78b7167a8ce3..39db0e96815d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops vortex_pm_ops = {
+static const struct dev_pm_ops vortex_pm_ops = {
.suspend = vortex_suspend,
.resume = vortex_resume,
.freeze = vortex_suspend,
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8c658cf6f62f..109d2783e4d8 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
}
__skb_pull(skb, sizeof(*p));
- st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+ st = this_cpu_ptr(sge->port_stats[p->iff]);
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct sge *sge = adapter->sge;
- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
- smp_processor_id());
+ struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
struct cpl_tx_pkt *cpl;
struct sk_buff *orig_skb = skb;
int ret;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0cbe3c0e7c06..b37730065688 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops dm9000_drv_pm_ops = {
+static const struct dev_pm_ops dm9000_drv_pm_ops = {
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
};
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index eae4ad749e9d..b9fcc9819837 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
/* it's OK to use per_cpu_ptr() because BHs are off */
pcpu_lstats = dev->ml_priv;
- lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
+ lb_stats = this_cpu_ptr(pcpu_lstats);
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 81bafd578478..d431b59e7d11 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -270,7 +270,7 @@ static int try_io_port(struct pcmcia_device *link)
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 8ad8384fc1c0..813aca3fc433 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -426,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
if (link->io.NumPorts2 != 0) {
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2d26b6ca28b9..92ed3fbf89a5 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link)
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index cc4853bc0253..6dd486d2977b 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -454,7 +454,7 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index a2eda28f903e..466fc72698c0 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -841,7 +841,7 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ;
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts2 = 8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index acfc5a3aa490..60f96c468a24 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -4859,7 +4859,7 @@ out:
return 0;
}
-static struct dev_pm_ops rtl8169_pm_ops = {
+static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
.freeze = rtl8169_suspend,
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 20d6095cf411..494cd91ea39c 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev)
return (to == 0) ? -EIO : 0;
}
-static struct dev_pm_ops smsc911x_pm_ops = {
+static const struct dev_pm_ops smsc911x_pm_ops = {
.suspend = smsc911x_suspend,
.resume = smsc911x_resume,
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 63099c58a6dd..3a15de56df9c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *rcv = NULL;
struct veth_priv *priv, *rcv_priv;
struct veth_net_stats *stats, *rcv_stats;
- int length, cpu;
+ int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
- cpu = smp_processor_id();
- stats = per_cpu_ptr(priv->stats, cpu);
- rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
+ stats = this_cpu_ptr(priv->stats);
+ rcv_stats = this_cpu_ptr(rcv_priv->stats);
if (!(rcv->flags & IFF_UP))
goto tx_drop;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1ceb9d0f8b97..9cc438282d77 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device)
return 0;
}
-static struct dev_pm_ops vmxnet3_pm_ops = {
+static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
};
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a7aae24f2889..166b67ea622f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -47,7 +47,7 @@
*/
static struct ring_buffer *op_ring_buffer_read;
static struct ring_buffer *op_ring_buffer_write;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
- struct oprofile_cpu_buffer *cpu_buf
- = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
goto fail;
for_each_possible_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
b->last_task = NULL;
b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
work_enabled = 1;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
work_enabled = 0;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
cancel_delayed_work(&b->work);
}
@@ -330,7 +329,7 @@ static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
/*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_received++;
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
log_sample(cpu_buf, pc, 0, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
if (!cpu_buf->tracing)
return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 272995d20293..68ea16ab645f 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
struct delayed_work work;
};
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
/*
* Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
*/
static inline void op_cpu_buffer_reset(int cpu)
{
- struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
+ struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 61689e814d46..917d28ebeacd 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
int i;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
return;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 13a64bc081b6..0bc5d474b168 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -779,12 +779,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
read_unlock(&pathentry->rw_lock);
DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
-
- temp = in;
-
- while (*temp && isspace(*temp))
- temp++;
-
+
+ temp = skip_spaces(in);
+
c = *temp++ - '0';
if ((c != 0) && (c != 1))
goto parse_error;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ce52ea34fee5..a49452e2aed9 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -43,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static struct dev_pm_ops pcie_portdrv_pm_ops = {
+static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index cd5082d3ca19..9f3adbd9f700 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -64,7 +64,7 @@ config PCMCIA_IOCTL
If unsure, say Y.
config CARDBUS
- bool "32-bit CardBus support"
+ bool "32-bit CardBus support"
depends on PCI
default y
---help---
@@ -87,8 +87,8 @@ config YENTA
select PCCARD_NONSTATIC
---help---
This option enables support for CardBus host bridges. Virtually
- all modern PCMCIA bridges are CardBus compatible. A "bridge" is
- the hardware inside your computer that PCMCIA cards are plugged
+ all modern PCMCIA bridges are CardBus compatible. A "bridge" is
+ the hardware inside your computer that PCMCIA cards are plugged
into.
To compile this driver as modules, choose M here: the
@@ -208,7 +208,7 @@ config PCMCIA_PXA2XX
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
+ || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2)
select PCMCIA_SOC_COMMON
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 382938313991..83ff802de544 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -67,7 +67,7 @@ pxa2xx-obj-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock_cs.o
pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
-pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
+pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index a73b040ddbfb..cdf50f3bc2df 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -27,8 +27,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
@@ -58,7 +58,7 @@
image number and an offset within that image. xlate_rom_addr()
converts an image/offset address to an absolute offset from the
ROM's base address.
-
+
=====================================================================*/
static u_int xlate_rom_addr(void __iomem *b, u_int addr)
@@ -85,10 +85,10 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr)
These are similar to setup_cis_mem and release_cis_mem for 16-bit
cards. The "result" that is used externally is the cb_cis_virt
pointer in the struct pcmcia_socket structure.
-
+
=====================================================================*/
-static void cb_release_cis_mem(struct pcmcia_socket * s)
+static void cb_release_cis_mem(struct pcmcia_socket *s)
{
if (s->cb_cis_virt) {
dev_dbg(&s->dev, "cb_release_cis_mem()\n");
@@ -98,7 +98,7 @@ static void cb_release_cis_mem(struct pcmcia_socket * s)
}
}
-static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res)
+static int cb_setup_cis_mem(struct pcmcia_socket *s, struct resource *res)
{
unsigned int start, size;
@@ -124,10 +124,11 @@ static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res)
This is used by the CIS processing code to read CIS information
from a CardBus device.
-
+
=====================================================================*/
-int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void *ptr)
+int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
+ void *ptr)
{
struct pci_dev *dev;
struct resource *res;
@@ -181,7 +182,7 @@ fail:
cb_alloc() and cb_free() allocate and free the kernel data
structures for a Cardbus device, and handle the lowest level PCI
device setup issues.
-
+
=====================================================================*/
static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
@@ -214,14 +215,14 @@ static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
}
}
-int __ref cb_alloc(struct pcmcia_socket * s)
+int __ref cb_alloc(struct pcmcia_socket *s)
{
struct pci_bus *bus = s->cb_dev->subordinate;
struct pci_dev *dev;
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
-// pcibios_fixup_bus(bus);
+/* pcibios_fixup_bus(bus); */
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
@@ -248,7 +249,7 @@ int __ref cb_alloc(struct pcmcia_socket * s)
return 0;
}
-void cb_free(struct pcmcia_socket * s)
+void cb_free(struct pcmcia_socket *s)
{
struct pci_dev *bridge = s->cb_dev;
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 8c1b73cf021b..25b1cd219e37 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -125,7 +125,7 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
Low-level functions to read and write CIS memory. I think the
write routine is only useful for writing one-byte registers.
-
+
======================================================================*/
/* Bits in attr field */
@@ -137,7 +137,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
{
void __iomem *sys, *end;
unsigned char *buf = ptr;
-
+
dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
if (attr & IS_INDIRECT) {
@@ -203,7 +203,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
{
void __iomem *sys, *end;
unsigned char *buf = ptr;
-
+
dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
if (attr & IS_INDIRECT) {
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(pcmcia_write_cis_mem);
This is a wrapper around read_cis_mem, with the same interface,
but which caches information, for cards whose CIS may not be
readable all the time.
-
+
======================================================================*/
static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
@@ -342,7 +342,7 @@ EXPORT_SYMBOL(destroy_cis_cache);
This verifies if the CIS of a card matches what is in the CIS
cache.
-
+
======================================================================*/
int verify_cis_cache(struct pcmcia_socket *s)
@@ -381,7 +381,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
For really bad cards, we provide a facility for uploading a
replacement CIS.
-
+
======================================================================*/
int pcmcia_replace_cis(struct pcmcia_socket *s,
@@ -406,7 +406,7 @@ EXPORT_SYMBOL(pcmcia_replace_cis);
/*======================================================================
The high-level CIS tuple services
-
+
======================================================================*/
typedef struct tuple_flags {
@@ -421,8 +421,6 @@ typedef struct tuple_flags {
#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
#define SPACE(f) (((tuple_flags *)(&(f)))->space)
-int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int func, tuple_t *tuple);
-
int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
{
if (!s)
@@ -523,10 +521,11 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
ofs++; continue;
}
}
-
+
/* End of chain? Follow long link if possible */
if (link[0] == CISTPL_END) {
- if ((ofs = follow_link(s, tuple)) < 0)
+ ofs = follow_link(s, tuple);
+ if (ofs < 0)
return -ENOSPC;
attr = SPACE(tuple->Flags);
read_cis_cache(s, attr, ofs, 2, link);
@@ -578,7 +577,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
} else
if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
break;
-
+
if (link[0] == tuple->DesiredTuple)
break;
ofs += link[1] + 2;
@@ -587,7 +586,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
return -ENOSPC;
}
-
+
tuple->TupleCode = link[0];
tuple->TupleLink = link[1];
tuple->CISOffset = ofs + 2;
@@ -623,7 +622,7 @@ EXPORT_SYMBOL(pccard_get_tuple_data);
/*======================================================================
Parsing routines for individual tuples
-
+
======================================================================*/
static int parse_device(tuple_t *tuple, cistpl_device_t *device)
@@ -637,26 +636,37 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device)
device->ndev = 0;
for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
-
- if (*p == 0xff) break;
+
+ if (*p == 0xff)
+ break;
device->dev[i].type = (*p >> 4);
device->dev[i].wp = (*p & 0x08) ? 1 : 0;
switch (*p & 0x07) {
- case 0: device->dev[i].speed = 0; break;
- case 1: device->dev[i].speed = 250; break;
- case 2: device->dev[i].speed = 200; break;
- case 3: device->dev[i].speed = 150; break;
- case 4: device->dev[i].speed = 100; break;
+ case 0:
+ device->dev[i].speed = 0;
+ break;
+ case 1:
+ device->dev[i].speed = 250;
+ break;
+ case 2:
+ device->dev[i].speed = 200;
+ break;
+ case 3:
+ device->dev[i].speed = 150;
+ break;
+ case 4:
+ device->dev[i].speed = 100;
+ break;
case 7:
- if (++p == q)
- return -EINVAL;
- device->dev[i].speed = SPEED_CVT(*p);
- while (*p & 0x80)
if (++p == q)
return -EINVAL;
- break;
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ break;
default:
- return -EINVAL;
+ return -EINVAL;
}
if (++p == q)
@@ -671,7 +681,7 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device)
if (++p == q)
break;
}
-
+
return 0;
}
@@ -706,9 +716,9 @@ static int parse_longlink_mfc(tuple_t *tuple,
{
u_char *p;
int i;
-
+
p = (u_char *)tuple->TupleData;
-
+
link->nfn = *p; p++;
if (tuple->TupleDataLen <= link->nfn*5)
return -EINVAL;
@@ -737,11 +747,13 @@ static int parse_strings(u_char *p, u_char *q, int max,
ns++;
for (;;) {
s[j++] = (*p == 0xff) ? '\0' : *p;
- if ((*p == '\0') || (*p == 0xff)) break;
+ if ((*p == '\0') || (*p == 0xff))
+ break;
if (++p == q)
return -EINVAL;
}
- if ((*p == 0xff) || (++p == q)) break;
+ if ((*p == 0xff) || (++p == q))
+ break;
}
if (found) {
*found = ns;
@@ -756,10 +768,10 @@ static int parse_strings(u_char *p, u_char *q, int max,
static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
{
u_char *p, *q;
-
+
p = (u_char *)tuple->TupleData;
q = p + tuple->TupleDataLen;
-
+
vers_1->major = *p; p++;
vers_1->minor = *p; p++;
if (p >= q)
@@ -774,10 +786,10 @@ static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
{
u_char *p, *q;
-
+
p = (u_char *)tuple->TupleData;
q = p + tuple->TupleDataLen;
-
+
return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
altstr->str, altstr->ofs, &altstr->ns);
}
@@ -793,7 +805,8 @@ static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
q = p + tuple->TupleDataLen;
for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
- if (p > q-2) break;
+ if (p > q-2)
+ break;
jedec->id[nid].mfr = p[0];
jedec->id[nid].info = p[1];
p += 2;
@@ -871,7 +884,7 @@ static int parse_config(tuple_t *tuple, cistpl_config_t *config)
The following routines are all used to parse the nightmarish
config table entries.
-
+
======================================================================*/
static u_char *parse_power(u_char *p, u_char *q,
@@ -880,17 +893,20 @@ static u_char *parse_power(u_char *p, u_char *q,
int i;
u_int scale;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
pwr->present = *p;
pwr->flags = 0;
p++;
for (i = 0; i < 7; i++)
if (pwr->present & (1<<i)) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
pwr->param[i] = POWER_CVT(*p);
scale = POWER_SCALE(*p);
while (*p & 0x80) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
if ((*p & 0x7f) < 100)
pwr->param[i] += (*p & 0x7f) * scale / 100;
else if (*p == 0x7d)
@@ -914,24 +930,28 @@ static u_char *parse_timing(u_char *p, u_char *q,
{
u_char scale;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
scale = *p;
if ((scale & 3) != 3) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->wait = SPEED_CVT(*p);
timing->waitscale = exponent[scale & 3];
} else
timing->wait = 0;
scale >>= 2;
if ((scale & 7) != 7) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->ready = SPEED_CVT(*p);
timing->rdyscale = exponent[scale & 7];
} else
timing->ready = 0;
scale >>= 3;
if (scale != 7) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->reserved = SPEED_CVT(*p);
timing->rsvscale = exponent[scale];
} else
@@ -946,7 +966,8 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
{
int i, j, bsz, lsz;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->flags = *p;
if (!(*p & 0x80)) {
@@ -955,24 +976,29 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
return p+1;
}
-
- if (++p == q) return NULL;
+
+ if (++p == q)
+ return NULL;
io->nwin = (*p & 0x0f) + 1;
bsz = (*p & 0x30) >> 4;
- if (bsz == 3) bsz++;
+ if (bsz == 3)
+ bsz++;
lsz = (*p & 0xc0) >> 6;
- if (lsz == 3) lsz++;
+ if (lsz == 3)
+ lsz++;
p++;
-
+
for (i = 0; i < io->nwin; i++) {
io->win[i].base = 0;
io->win[i].len = 1;
for (j = 0; j < bsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->win[i].base += *p << (j*8);
}
for (j = 0; j < lsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->win[i].len += *p << (j*8);
}
}
@@ -986,27 +1012,32 @@ static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
int i, j, asz, lsz, has_ha;
u_int len, ca, ha;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
mem->nwin = (*p & 0x07) + 1;
lsz = (*p & 0x18) >> 3;
asz = (*p & 0x60) >> 5;
has_ha = (*p & 0x80);
- if (++p == q) return NULL;
-
+ if (++p == q)
+ return NULL;
+
for (i = 0; i < mem->nwin; i++) {
len = ca = ha = 0;
for (j = 0; j < lsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
len += *p << (j*8);
}
for (j = 0; j < asz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
ca += *p << (j*8);
}
if (has_ha)
for (j = 0; j < asz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
ha += *p << (j*8);
}
mem->win[i].len = len << 8;
@@ -1095,7 +1126,7 @@ static int parse_cftable_entry(tuple_t *tuple,
entry->timing.ready = 0;
entry->timing.reserved = 0;
}
-
+
/* I/O window options */
if (features & 0x08) {
p = parse_io(p, q, &entry->io);
@@ -1103,7 +1134,7 @@ static int parse_cftable_entry(tuple_t *tuple,
return -EINVAL;
} else
entry->io.nwin = 0;
-
+
/* Interrupt options */
if (features & 0x10) {
p = parse_irq(p, q, &entry->irq);
@@ -1153,7 +1184,7 @@ static int parse_cftable_entry(tuple_t *tuple,
}
entry->subtuples = q-p;
-
+
return 0;
}
@@ -1176,7 +1207,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
{
u_char *p;
-
+
p = (u_char *)tuple->TupleData;
if ((*p != 3) || (tuple->TupleDataLen < 6))
return -EINVAL;
@@ -1231,7 +1262,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple,
entry->io = *p; p++;
} else
entry->io = 0;
-
+
/* Interrupt options */
if (features & 0x10) {
p = parse_irq(p, q, &entry->irq);
@@ -1264,7 +1295,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple,
}
entry->subtuples = q-p;
-
+
return 0;
}
@@ -1281,7 +1312,8 @@ static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
q = p + tuple->TupleDataLen;
for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
- if (p > q-6) break;
+ if (p > q-6)
+ break;
geo->geo[n].buswidth = p[0];
geo->geo[n].erase_block = 1 << (p[1]-1);
geo->geo[n].read_block = 1 << (p[2]-1);
@@ -1302,13 +1334,13 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
if (tuple->TupleDataLen < 10)
return -EINVAL;
-
+
p = tuple->TupleData;
q = p + tuple->TupleDataLen;
v2->vers = p[0];
v2->comply = p[1];
- v2->dindex = get_unaligned_le16(p +2 );
+ v2->dindex = get_unaligned_le16(p + 2);
v2->vspec8 = p[6];
v2->vspec9 = p[7];
v2->nhdr = p[8];
@@ -1322,7 +1354,7 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org)
{
u_char *p, *q;
int i;
-
+
p = tuple->TupleData;
q = p + tuple->TupleDataLen;
if (p == q)
@@ -1332,7 +1364,8 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org)
return -EINVAL;
for (i = 0; i < 30; i++) {
org->desc[i] = *p;
- if (*p == '\0') break;
+ if (*p == '\0')
+ break;
if (++p == q)
return -EINVAL;
}
@@ -1363,7 +1396,7 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
{
int ret = 0;
-
+
if (tuple->TupleDataLen > tuple->TupleDataMax)
return -EINVAL;
switch (tuple->TupleCode) {
@@ -1448,7 +1481,7 @@ EXPORT_SYMBOL(pcmcia_parse_tuple);
/*======================================================================
This is used internally by Card Services to look up CIS stuff.
-
+
======================================================================*/
int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse)
@@ -1550,7 +1583,7 @@ EXPORT_SYMBOL(pccard_loop_tuple);
checks include making sure several critical tuples are present and
valid; seeing if the total number of tuples is reasonable; and
looking for tuples that use reserved codes.
-
+
======================================================================*/
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 790af87a922f..6d6f82b38a68 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -135,7 +135,7 @@ int pcmcia_socket_dev_resume(struct device *dev)
EXPORT_SYMBOL(pcmcia_socket_dev_resume);
-struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
+struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt)
{
struct device *dev = get_device(&skt->dev);
if (!dev)
@@ -145,7 +145,7 @@ struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
put_device(&skt->dev);
return NULL;
}
- return (skt);
+ return skt;
}
EXPORT_SYMBOL(pcmcia_get_socket);
@@ -297,7 +297,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
EXPORT_SYMBOL(pcmcia_unregister_socket);
-struct pcmcia_socket * pcmcia_get_socket_by_nr(unsigned int nr)
+struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr)
{
struct pcmcia_socket *s;
@@ -736,7 +736,7 @@ EXPORT_SYMBOL(pcmcia_parse_events);
/* register pcmcia_callback */
int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
{
- int ret = 0;
+ int ret = 0;
/* s->skt_mutex also protects s->callback */
mutex_lock(&s->skt_mutex);
@@ -848,7 +848,7 @@ EXPORT_SYMBOL(pcmcia_suspend_card);
int pcmcia_resume_card(struct pcmcia_socket *skt)
{
int ret;
-
+
dev_dbg(&skt->dev, "waking up socket\n");
mutex_lock(&skt->skt_mutex);
@@ -876,7 +876,7 @@ EXPORT_SYMBOL(pcmcia_resume_card);
int pcmcia_eject_card(struct pcmcia_socket *skt)
{
int ret;
-
+
dev_dbg(&skt->dev, "user eject request\n");
mutex_lock(&skt->skt_mutex);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 05893d41dd41..1a4a3c49cc15 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -57,7 +57,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
"function\n", p_drv->drv.name);
while (did && did->match_flags) {
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!did->prod_id[i])
continue;
@@ -105,7 +105,7 @@ pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count)
__u16 match_flags, manf_id, card_id;
__u8 func_id, function, device_no;
__u32 prod_id_hash[4] = {0, 0, 0, 0};
- int fields=0;
+ int fields = 0;
int retval = 0;
fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x",
@@ -214,7 +214,7 @@ EXPORT_SYMBOL(pcmcia_unregister_driver);
/* pcmcia_device handling */
-struct pcmcia_device * pcmcia_get_dev(struct pcmcia_device *p_dev)
+struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev)
{
struct device *tmp_dev;
tmp_dev = get_device(&p_dev->dev);
@@ -258,7 +258,7 @@ static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
return;
}
-static int pcmcia_device_probe(struct device * dev)
+static int pcmcia_device_probe(struct device *dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
@@ -325,7 +325,7 @@ put_module:
put_dev:
if (ret)
put_device(dev);
- return (ret);
+ return ret;
}
@@ -354,7 +354,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
list_del(&p_dev->socket_device_list);
- p_dev->_removed=1;
+ p_dev->_removed = 1;
spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
dev_dbg(&p_dev->dev, "unregistering device\n");
@@ -364,7 +364,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
return;
}
-static int pcmcia_device_remove(struct device * dev)
+static int pcmcia_device_remove(struct device *dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
@@ -391,7 +391,7 @@ static int pcmcia_device_remove(struct device * dev)
return 0;
if (p_drv->remove)
- p_drv->remove(p_dev);
+ p_drv->remove(p_dev);
p_dev->dev_node = NULL;
@@ -499,7 +499,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
*/
static DEFINE_MUTEX(device_add_lock);
-struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
+struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
{
struct pcmcia_device *p_dev, *tmp_dev;
unsigned long flags;
@@ -545,8 +545,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
* Note that this is serialized by the device_add_lock, so that
* only one such struct will be created.
*/
- list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
- if (p_dev->func == tmp_dev->func) {
+ list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
+ if (p_dev->func == tmp_dev->func) {
p_dev->function_config = tmp_dev->function_config;
p_dev->io = tmp_dev->io;
p_dev->irq = tmp_dev->irq;
@@ -627,10 +627,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
no_funcs = 1;
s->functions = no_funcs;
- for (i=0; i < no_funcs; i++)
+ for (i = 0; i < no_funcs; i++)
pcmcia_device_add(s, i);
- return (ret);
+ return ret;
}
@@ -756,7 +756,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
release:
release_firmware(fw);
- return (ret);
+ return ret;
}
#else /* !CONFIG_PCMCIA_LOAD_CIS */
@@ -852,7 +852,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) {
int i;
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
if (dev->prod_id[i])
return 0;
if (dev->has_manf_id || dev->has_card_id || dev->has_func_id)
@@ -865,9 +865,10 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
}
-static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
- struct pcmcia_device * p_dev = to_pcmcia_dev(dev);
- struct pcmcia_driver * p_drv = to_pcmcia_drv(drv);
+static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ struct pcmcia_driver *p_drv = to_pcmcia_drv(drv);
struct pcmcia_device_id *did = p_drv->id_table;
struct pcmcia_dynid *dynid;
@@ -917,7 +918,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
p_dev = to_pcmcia_dev(dev);
/* calculate hashes */
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!p_dev->prod_id[i])
continue;
hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i]));
@@ -984,14 +985,14 @@ static void runtime_resume(struct device *dev)
static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->test ? sprintf (buf, format, p_dev->field) : -ENODEV; \
+ return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \
}
#define pcmcia_device_stringattr(name, field) \
static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->field ? sprintf (buf, "%s\n", p_dev->field) : -ENODEV; \
+ return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \
}
pcmcia_device_attr(func, socket, "0x%02x\n");
@@ -1020,8 +1021,8 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
int ret = 0;
- if (!count)
- return -EINVAL;
+ if (!count)
+ return -EINVAL;
if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
ret = runtime_suspend(dev);
@@ -1039,10 +1040,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
u32 hash[4] = { 0, 0, 0, 0};
/* calculate hashes */
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!p_dev->prod_id[i])
continue;
- hash[i] = crc32(0,p_dev->prod_id[i],strlen(p_dev->prod_id[i]));
+ hash[i] = crc32(0, p_dev->prod_id[i],
+ strlen(p_dev->prod_id[i]));
}
return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X"
"pa%08Xpb%08Xpc%08Xpd%08X\n",
@@ -1091,7 +1093,7 @@ static struct device_attribute pcmcia_dev_attrs[] = {
/* PM support, also needed for reset */
-static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
@@ -1131,10 +1133,10 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
}
-static int pcmcia_dev_resume(struct device * dev)
+static int pcmcia_dev_resume(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- struct pcmcia_driver *p_drv = NULL;
+ struct pcmcia_driver *p_drv = NULL;
int ret = 0;
if (!p_dev->suspended)
@@ -1211,7 +1213,7 @@ static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
/*======================================================================
The card status event handler.
-
+
======================================================================*/
/* Normally, the event is passed to individual drivers after
@@ -1264,7 +1266,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
} /* ds_event */
-struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *_p_dev)
+struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_device *ret = NULL;
@@ -1329,7 +1331,7 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
if (ret) {
dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n");
pcmcia_put_socket(socket);
- return (ret);
+ return ret;
}
return 0;
@@ -1400,7 +1402,7 @@ static int __init init_pcmcia_bus(void)
return 0;
}
-fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
+fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
* pcmcia_socket_class is already registered */
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index c4d7908fa37f..f73fd5beaa37 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -88,12 +88,12 @@ static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
p_drv = container_of(drv, struct pcmcia_driver, drv);
- return (p_drv);
+ return p_drv;
}
#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *proc_pccard = NULL;
+static struct proc_dir_entry *proc_pccard;
static int proc_read_drivers_callback(struct device_driver *driver, void *_m)
{
@@ -158,7 +158,8 @@ static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
#else
-static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
+static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
+{
return 0;
}
@@ -195,7 +196,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
begin = adj->resource.memory.Base;
end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
if (s->resource_ops->add_mem)
- ret =s->resource_ops->add_mem(s, adj->Action, begin, end);
+ ret = s->resource_ops->add_mem(s, adj->Action, begin, end);
case RES_IO_RANGE:
begin = adj->resource.io.BasePort;
end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
@@ -215,7 +216,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
}
up_read(&pcmcia_socket_list_rwsem);
- return (ret);
+ return ret;
}
@@ -490,7 +491,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
}
spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
- list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
+ list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
if (p_dev->func == bind_info->function) {
if ((p_dev->dev.driver == &p_drv->drv)) {
if (p_dev->cardmgr) {
@@ -558,7 +559,7 @@ rescan:
err_put:
pcmcia_put_socket(s);
- return (ret);
+ return ret;
} /* bind_request */
#ifdef CONFIG_CARDBUS
@@ -655,7 +656,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
err_put:
pcmcia_put_dev(p_dev);
- return (ret);
+ return ret;
} /* get_device_info */
@@ -664,7 +665,7 @@ static int ds_open(struct inode *inode, struct file *file)
socket_t i = iminor(inode);
struct pcmcia_socket *s;
user_info_t *user;
- static int warning_printed = 0;
+ static int warning_printed;
int ret = 0;
pr_debug("ds_open(socket %d)\n", i);
@@ -738,12 +739,13 @@ static int ds_release(struct inode *inode, struct file *file)
s = user->socket;
/* Unlink user data structure */
- if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
s->pcmcia_state.busy = 0;
- }
+
file->private_data = NULL;
for (link = &s->user; *link; link = &(*link)->next)
- if (*link == user) break;
+ if (*link == user)
+ break;
if (link == NULL)
goto out;
*link = user->next;
@@ -774,7 +776,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
s = user->socket;
if (s->pcmcia_state.dead)
- return -EIO;
+ return -EIO;
ret = wait_event_interruptible(s->queue, !queue_empty(user));
if (ret == 0)
@@ -824,7 +826,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
/*====================================================================*/
-static int ds_ioctl(struct inode * inode, struct file * file,
+static int ds_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
struct pcmcia_socket *s;
@@ -842,10 +844,11 @@ static int ds_ioctl(struct inode * inode, struct file * file,
s = user->socket;
if (s->pcmcia_state.dead)
- return -EIO;
+ return -EIO;
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
- if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;
+ if (size > sizeof(ds_ioctl_arg_t))
+ return -EINVAL;
/* Permission check */
if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
@@ -1024,8 +1027,8 @@ static int ds_ioctl(struct inode * inode, struct file * file,
}
if (cmd & IOC_OUT) {
- if (__copy_to_user(uarg, (char *)buf, size))
- err = -EFAULT;
+ if (__copy_to_user(uarg, (char *)buf, size))
+ err = -EFAULT;
}
free_out:
@@ -1045,7 +1048,8 @@ static const struct file_operations ds_fops = {
.poll = ds_poll,
};
-void __init pcmcia_setup_ioctl(void) {
+void __init pcmcia_setup_ioctl(void)
+{
int i;
/* Set up character device for user mode clients */
@@ -1064,7 +1068,8 @@ void __init pcmcia_setup_ioctl(void) {
}
-void __exit pcmcia_cleanup_ioctl(void) {
+void __exit pcmcia_cleanup_ioctl(void)
+{
#ifdef CONFIG_PROC_FS
if (proc_pccard) {
remove_proc_entry("drivers", proc_pccard);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index a8bf8c1b45ed..d5db95644b64 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -33,7 +33,7 @@
/* Access speed for IO windows */
-static int io_speed = 0;
+static int io_speed;
module_param(io_speed, int, 0444);
@@ -62,7 +62,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
num, align);
align = 0;
} else
- while (align && (align < num)) align <<= 1;
+ while (align && (align < num))
+ align <<= 1;
}
if (*base & ~(align-1)) {
dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n",
@@ -338,7 +339,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
struct pcmcia_socket *s = p_dev->socket;
config_t *c = p_dev->function_config;
- if (!p_dev->_io )
+ if (!p_dev->_io)
return -EINVAL;
p_dev->_io = 0;
@@ -362,7 +363,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
{
struct pcmcia_socket *s = p_dev->socket;
- config_t *c= p_dev->function_config;
+ config_t *c = p_dev->function_config;
if (!p_dev->_irq)
return -EINVAL;
@@ -383,9 +384,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
s->irq.AssignedIRQ = 0;
}
- if (req->Handler) {
+ if (req->Handler)
free_irq(req->AssignedIRQ, p_dev->priv);
- }
#ifdef CONFIG_PCMCIA_PROBE
pcmcia_used_irq[req->AssignedIRQ]--;
@@ -656,7 +656,8 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
type = IRQF_SHARED;
else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
type = IRQF_SHARED;
- else printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
+ else
+ printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
#ifdef CONFIG_PCMCIA_PROBE
@@ -788,7 +789,8 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
/* Allocate system memory window */
for (w = 0; w < MAX_WIN; w++)
- if (!(s->state & SOCKET_WIN_REQ(w))) break;
+ if (!(s->state & SOCKET_WIN_REQ(w)))
+ break;
if (w == MAX_WIN) {
dev_dbg(&s->dev, "all windows are used already\n");
return -EINVAL;
@@ -826,18 +828,19 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
s->state |= SOCKET_WIN_REQ(w);
/* Return window handle */
- if (s->features & SS_CAP_STATIC_MAP) {
+ if (s->features & SS_CAP_STATIC_MAP)
req->Base = win->static_start;
- } else {
+ else
req->Base = win->res->start;
- }
+
*wh = w + 1;
return 0;
} /* pcmcia_request_window */
EXPORT_SYMBOL(pcmcia_request_window);
-void pcmcia_disable_device(struct pcmcia_device *p_dev) {
+void pcmcia_disable_device(struct pcmcia_device *p_dev)
+{
pcmcia_release_configuration(p_dev);
pcmcia_release_io(p_dev, &p_dev->io);
pcmcia_release_irq(p_dev, &p_dev->irq);
@@ -970,7 +973,7 @@ int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
&loop, pcmcia_do_loop_tuple);
-};
+}
EXPORT_SYMBOL(pcmcia_loop_tuple);
@@ -1000,7 +1003,7 @@ static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
} else
dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
return 0;
-};
+}
/**
* pcmcia_get_tuple() - get first tuple from CIS
@@ -1024,7 +1027,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
return get.len;
-};
+}
EXPORT_SYMBOL(pcmcia_get_tuple);
@@ -1057,7 +1060,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
for (i = 0; i < 6; i++)
dev->dev_addr[i] = tuple->TupleData[i+2];
return 0;
-};
+}
/**
* pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
@@ -1071,6 +1074,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
{
return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
-};
+}
EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 84dde7768ad5..3aabf1e37988 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,7 +214,8 @@ static void pxa2xx_configure_sockets(struct device *dev)
MECR |= MECR_CIT;
/* Set MECR:NOS (Number Of Sockets) */
- if ((ops->first + ops->nr) > 1 || machine_is_viper())
+ if ((ops->first + ops->nr) > 1 ||
+ machine_is_viper() || machine_is_arcom_zeus())
MECR |= MECR_NOS;
else
MECR &= ~MECR_NOS;
@@ -252,6 +253,7 @@ int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
return soc_pcmcia_add_one(skt);
}
+EXPORT_SYMBOL(pxa2xx_drv_pcmcia_add_one);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
{
@@ -261,19 +263,19 @@ void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
ops->frequency_change = pxa2xx_pcmcia_frequency_change;
#endif
}
+EXPORT_SYMBOL(pxa2xx_drv_pcmcia_ops);
-int __pxa2xx_drv_pcmcia_probe(struct device *dev)
+static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
{
int i, ret = 0;
struct pcmcia_low_level *ops;
struct skt_dev_info *sinfo;
struct soc_pcmcia_socket *skt;
- if (!dev || !dev->platform_data)
+ ops = (struct pcmcia_low_level *)dev->dev.platform_data;
+ if (!ops)
return -ENODEV;
- ops = (struct pcmcia_low_level *)dev->platform_data;
-
pxa2xx_drv_pcmcia_ops(ops);
sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
@@ -308,13 +310,6 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
return ret;
}
-EXPORT_SYMBOL(__pxa2xx_drv_pcmcia_probe);
-
-
-static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
-{
- return __pxa2xx_drv_pcmcia_probe(&dev->dev);
-}
static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
{
@@ -341,7 +336,7 @@ static int pxa2xx_drv_pcmcia_resume(struct device *dev)
return pcmcia_socket_dev_resume(dev);
}
-static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
+static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
.suspend = pxa2xx_drv_pcmcia_suspend,
.resume = pxa2xx_drv_pcmcia_resume,
};
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index cb5efaec886f..bb62ea87b8f9 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,6 +1,3 @@
-/* temporary measure */
-extern int __pxa2xx_drv_pcmcia_probe(struct device *);
-
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 3a8993ed5621..459a232d66be 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -67,7 +67,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
if (ret)
goto err7;
- skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
return 0;
err7:
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 490749ea677f..d08802fe35f9 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -40,7 +40,7 @@ static struct pcmcia_irqs irqs[] = {
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index 27be2e154df2..a51f2077644a 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -1,9 +1,8 @@
/*
- * VIPER PCMCIA support
+ * Viper/Zeus PCMCIA support
* Copyright 2004 Arcom Control Systems
*
* Maintained by Marc Zyngier <maz@misterjones.org>
- * <marc.zyngier@altran.com>
*
* Based on:
* iPAQ h2200 PCMCIA support
@@ -26,37 +25,47 @@
#include <asm/irq.h>
-#include <mach/viper.h>
-#include <asm/mach-types.h>
+#include <mach/arcom-pcmcia.h>
#include "soc_common.h"
#include "pxa2xx_base.h"
+static struct platform_device *arcom_pcmcia_dev;
+
static struct pcmcia_irqs irqs[] = {
- { 0, gpio_to_irq(VIPER_CF_CD_GPIO), "PCMCIA_CD" }
+ {
+ .sock = 0,
+ .str = "PCMCIA_CD",
+ },
};
+static inline struct arcom_pcmcia_pdata *viper_get_pdata(void)
+{
+ return arcom_pcmcia_dev->dev.platform_data;
+}
+
static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
unsigned long flags;
- skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO);
+ skt->socket.pci_irq = gpio_to_irq(pdata->rdy_gpio);
+ irqs[0].irq = gpio_to_irq(pdata->cd_gpio);
- if (gpio_request(VIPER_CF_CD_GPIO, "CF detect"))
+ if (gpio_request(pdata->cd_gpio, "CF detect"))
goto err_request_cd;
- if (gpio_request(VIPER_CF_RDY_GPIO, "CF ready"))
+ if (gpio_request(pdata->rdy_gpio, "CF ready"))
goto err_request_rdy;
- if (gpio_request(VIPER_CF_POWER_GPIO, "CF power"))
+ if (gpio_request(pdata->pwr_gpio, "CF power"))
goto err_request_pwr;
local_irq_save(flags);
- /* GPIO 82 is the CF power enable line. initially off */
- if (gpio_direction_output(VIPER_CF_POWER_GPIO, 0) ||
- gpio_direction_input(VIPER_CF_CD_GPIO) ||
- gpio_direction_input(VIPER_CF_RDY_GPIO)) {
+ if (gpio_direction_output(pdata->pwr_gpio, 0) ||
+ gpio_direction_input(pdata->cd_gpio) ||
+ gpio_direction_input(pdata->rdy_gpio)) {
local_irq_restore(flags);
goto err_dir;
}
@@ -66,13 +75,13 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
err_dir:
- gpio_free(VIPER_CF_POWER_GPIO);
+ gpio_free(pdata->pwr_gpio);
err_request_pwr:
- gpio_free(VIPER_CF_RDY_GPIO);
+ gpio_free(pdata->rdy_gpio);
err_request_rdy:
- gpio_free(VIPER_CF_CD_GPIO);
+ gpio_free(pdata->cd_gpio);
err_request_cd:
- printk(KERN_ERR "viper: Failed to setup PCMCIA GPIOs\n");
+ dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n");
return -1;
}
@@ -81,17 +90,21 @@ err_request_cd:
*/
static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
- gpio_free(VIPER_CF_POWER_GPIO);
- gpio_free(VIPER_CF_RDY_GPIO);
- gpio_free(VIPER_CF_CD_GPIO);
+ gpio_free(pdata->pwr_gpio);
+ gpio_free(pdata->rdy_gpio);
+ gpio_free(pdata->cd_gpio);
}
static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
- state->detect = gpio_get_value(VIPER_CF_CD_GPIO) ? 0 : 1;
- state->ready = gpio_get_value(VIPER_CF_RDY_GPIO) ? 1 : 0;
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
+ state->detect = !gpio_get_value(pdata->cd_gpio);
+ state->ready = !!gpio_get_value(pdata->rdy_gpio);
state->bvd1 = 1;
state->bvd2 = 1;
state->wrprot = 0;
@@ -102,20 +115,21 @@ static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
/* Silently ignore Vpp, output enable, speaker enable. */
- viper_cf_rst(state->flags & SS_RESET);
+ pdata->reset(state->flags & SS_RESET);
/* Apply socket voltage */
switch (state->Vcc) {
case 0:
- gpio_set_value(VIPER_CF_POWER_GPIO, 0);
+ gpio_set_value(pdata->pwr_gpio, 0);
break;
case 33:
- gpio_set_value(VIPER_CF_POWER_GPIO, 1);
+ gpio_set_value(pdata->pwr_gpio, 1);
break;
default:
- printk(KERN_ERR "%s: Unsupported Vcc:%d\n",
- __func__, state->Vcc);
+ dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc);
return -1;
}
@@ -130,7 +144,7 @@ static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
}
-static struct pcmcia_low_level viper_pcmcia_ops __initdata = {
+static struct pcmcia_low_level viper_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = viper_pcmcia_hw_init,
.hw_shutdown = viper_pcmcia_hw_shutdown,
@@ -143,17 +157,25 @@ static struct pcmcia_low_level viper_pcmcia_ops __initdata = {
static struct platform_device *viper_pcmcia_device;
-static int __init viper_pcmcia_init(void)
+static int viper_pcmcia_probe(struct platform_device *pdev)
{
int ret;
- if (!machine_is_viper())
- return -ENODEV;
+ /* I can't imagine more than one device, but you never know... */
+ if (arcom_pcmcia_dev)
+ return -EEXIST;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!viper_pcmcia_device)
return -ENOMEM;
+ arcom_pcmcia_dev = pdev;
+
+ viper_pcmcia_device->dev.parent = &pdev->dev;
+
ret = platform_device_add_data(viper_pcmcia_device,
&viper_pcmcia_ops,
sizeof(viper_pcmcia_ops));
@@ -161,18 +183,49 @@ static int __init viper_pcmcia_init(void)
if (!ret)
ret = platform_device_add(viper_pcmcia_device);
- if (ret)
+ if (ret) {
platform_device_put(viper_pcmcia_device);
+ arcom_pcmcia_dev = NULL;
+ }
return ret;
}
-static void __exit viper_pcmcia_exit(void)
+static int viper_pcmcia_remove(struct platform_device *pdev)
{
platform_device_unregister(viper_pcmcia_device);
+ arcom_pcmcia_dev = NULL;
+ return 0;
+}
+
+static struct platform_device_id viper_pcmcia_id_table[] = {
+ { .name = "viper-pcmcia", },
+ { .name = "zeus-pcmcia", },
+ { },
+};
+
+static struct platform_driver viper_pcmcia_driver = {
+ .probe = viper_pcmcia_probe,
+ .remove = viper_pcmcia_remove,
+ .driver = {
+ .name = "arcom-pcmcia",
+ .owner = THIS_MODULE,
+ },
+ .id_table = viper_pcmcia_id_table,
+};
+
+static int __init viper_pcmcia_init(void)
+{
+ return platform_driver_register(&viper_pcmcia_driver);
+}
+
+static void __exit viper_pcmcia_exit(void)
+{
+ return platform_driver_unregister(&viper_pcmcia_driver);
}
module_init(viper_pcmcia_init);
module_exit(viper_pcmcia_exit);
+MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index de0e770ce6a3..52db17263d8b 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -126,16 +126,16 @@ static void pcmcia_align(void *align_data, struct resource *res,
res->start = start;
#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300) {
- start = (start + 0x3ff) & ~0x3ff;
- res->start = start;
- }
- }
+ if (res->flags & IORESOURCE_IO) {
+ if (start & 0x300) {
+ start = (start + 0x3ff) & ~0x3ff;
+ res->start = start;
+ }
+ }
#endif
#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
+ if (res->flags & IORESOURCE_IO) {
if ((res->start + size - 1) >= 1024)
res->start = res->end;
}
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 7039f3cf5b77..9b0dc433a8c3 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -24,9 +24,9 @@
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
@@ -144,43 +144,44 @@ static int add_interval(struct resource_map *map, u_long base, u_long num)
static int sub_interval(struct resource_map *map, u_long base, u_long num)
{
- struct resource_map *p, *q;
-
- for (p = map; ; p = q) {
- q = p->next;
- if (q == map)
- break;
- if ((q->base+q->num > base) && (base+num > q->base)) {
- if (q->base >= base) {
- if (q->base+q->num <= base+num) {
- /* Delete whole block */
- p->next = q->next;
- kfree(q);
- /* don't advance the pointer yet */
- q = p;
- } else {
- /* Cut off bit from the front */
- q->num = q->base + q->num - base - num;
- q->base = base + num;
- }
- } else if (q->base+q->num <= base+num) {
- /* Cut off bit from the end */
- q->num = base - q->base;
- } else {
- /* Split the block into two pieces */
- p = kmalloc(sizeof(struct resource_map), GFP_KERNEL);
- if (!p) {
- printk(KERN_WARNING "out of memory to update resources\n");
- return -ENOMEM;
+ struct resource_map *p, *q;
+
+ for (p = map; ; p = q) {
+ q = p->next;
+ if (q == map)
+ break;
+ if ((q->base+q->num > base) && (base+num > q->base)) {
+ if (q->base >= base) {
+ if (q->base+q->num <= base+num) {
+ /* Delete whole block */
+ p->next = q->next;
+ kfree(q);
+ /* don't advance the pointer yet */
+ q = p;
+ } else {
+ /* Cut off bit from the front */
+ q->num = q->base + q->num - base - num;
+ q->base = base + num;
+ }
+ } else if (q->base+q->num <= base+num) {
+ /* Cut off bit from the end */
+ q->num = base - q->base;
+ } else {
+ /* Split the block into two pieces */
+ p = kmalloc(sizeof(struct resource_map),
+ GFP_KERNEL);
+ if (!p) {
+ printk(KERN_WARNING "out of memory to update resources\n");
+ return -ENOMEM;
+ }
+ p->base = base+num;
+ p->num = q->base+q->num - p->base;
+ q->num = base - q->base;
+ p->next = q->next ; q->next = p;
+ }
}
- p->base = base+num;
- p->num = q->base+q->num - p->base;
- q->num = base - q->base;
- p->next = q->next ; q->next = p;
- }
}
- }
- return 0;
+ return 0;
}
/*======================================================================
@@ -194,69 +195,72 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num)
static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
unsigned int num)
{
- struct resource *res;
- struct socket_data *s_data = s->resource_data;
- unsigned int i, j, bad;
- int any;
- u_char *b, hole, most;
-
- dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:",
- base, base+num-1);
-
- /* First, what does a floating port look like? */
- b = kzalloc(256, GFP_KERNEL);
- if (!b) {
- printk("\n");
- dev_printk(KERN_ERR, &s->dev,
- "do_io_probe: unable to kmalloc 256 bytes");
- return;
- }
- for (i = base, most = 0; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe");
- if (!res)
- continue;
- hole = inb(i);
- for (j = 1; j < 8; j++)
- if (inb(i+j) != hole) break;
- free_region(res);
- if ((j == 8) && (++b[hole] > b[most]))
- most = hole;
- if (b[most] == 127) break;
- }
- kfree(b);
-
- bad = any = 0;
- for (i = base; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe");
- if (!res)
- continue;
- for (j = 0; j < 8; j++)
- if (inb(i+j) != most) break;
- free_region(res);
- if (j < 8) {
- if (!any)
- printk(" excluding");
- if (!bad)
- bad = any = i;
- } else {
- if (bad) {
- sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#x-%#x", bad, i-1);
- bad = 0;
- }
+ struct resource *res;
+ struct socket_data *s_data = s->resource_data;
+ unsigned int i, j, bad;
+ int any;
+ u_char *b, hole, most;
+
+ dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:",
+ base, base+num-1);
+
+ /* First, what does a floating port look like? */
+ b = kzalloc(256, GFP_KERNEL);
+ if (!b) {
+ printk("\n");
+ dev_printk(KERN_ERR, &s->dev,
+ "do_io_probe: unable to kmalloc 256 bytes");
+ return;
}
- }
- if (bad) {
- if ((num > 16) && (bad == base) && (i == base+num)) {
- printk(" nothing: probe failed.\n");
- return;
- } else {
- sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#x-%#x", bad, i-1);
+ for (i = base, most = 0; i < base+num; i += 8) {
+ res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ if (!res)
+ continue;
+ hole = inb(i);
+ for (j = 1; j < 8; j++)
+ if (inb(i+j) != hole)
+ break;
+ free_region(res);
+ if ((j == 8) && (++b[hole] > b[most]))
+ most = hole;
+ if (b[most] == 127)
+ break;
}
- }
+ kfree(b);
- printk(any ? "\n" : " clean.\n");
+ bad = any = 0;
+ for (i = base; i < base+num; i += 8) {
+ res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ if (!res)
+ continue;
+ for (j = 0; j < 8; j++)
+ if (inb(i+j) != most)
+ break;
+ free_region(res);
+ if (j < 8) {
+ if (!any)
+ printk(" excluding");
+ if (!bad)
+ bad = any = i;
+ } else {
+ if (bad) {
+ sub_interval(&s_data->io_db, bad, i-bad);
+ printk(" %#x-%#x", bad, i-1);
+ bad = 0;
+ }
+ }
+ }
+ if (bad) {
+ if ((num > 16) && (bad == base) && (i == base+num)) {
+ printk(" nothing: probe failed.\n");
+ return;
+ } else {
+ sub_interval(&s_data->io_db, bad, i-bad);
+ printk(" %#x-%#x", bad, i-1);
+ }
+ }
+
+ printk(any ? "\n" : " clean.\n");
}
#endif
@@ -327,8 +331,9 @@ cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
unsigned int info1, info2;
int ret = 0;
- res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
- res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe");
+ res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
+ res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
+ "PCMCIA memprobe");
if (res1 && res2) {
ret = readable(s, res1, &info1);
@@ -347,8 +352,9 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size)
struct resource *res1, *res2;
int a = -1, b = -1;
- res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
- res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe");
+ res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
+ res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
+ "PCMCIA memprobe");
if (res1 && res2) {
a = checksum(s, res1);
@@ -371,42 +377,43 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size)
static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
{
- struct socket_data *s_data = s->resource_data;
- u_long i, j, bad, fail, step;
-
- dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:",
- base, base+num-1);
- bad = fail = 0;
- step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
- /* don't allow too large steps */
- if (step > 0x800000)
- step = 0x800000;
- /* cis_readable wants to map 2x map_size */
- if (step < 2 * s->map_size)
- step = 2 * s->map_size;
- for (i = j = base; i < base+num; i = j + step) {
- if (!fail) {
- for (j = i; j < base+num; j += step) {
- if (cis_readable(s, j, step))
- break;
- }
- fail = ((i == base) && (j == base+num));
- }
- if (fail) {
- for (j = i; j < base+num; j += 2*step)
- if (checksum_match(s, j, step) &&
- checksum_match(s, j + step, step))
- break;
- }
- if (i != j) {
- if (!bad) printk(" excluding");
- printk(" %#05lx-%#05lx", i, j-1);
- sub_interval(&s_data->mem_db, i, j-i);
- bad += j-i;
+ struct socket_data *s_data = s->resource_data;
+ u_long i, j, bad, fail, step;
+
+ dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:",
+ base, base+num-1);
+ bad = fail = 0;
+ step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
+ /* don't allow too large steps */
+ if (step > 0x800000)
+ step = 0x800000;
+ /* cis_readable wants to map 2x map_size */
+ if (step < 2 * s->map_size)
+ step = 2 * s->map_size;
+ for (i = j = base; i < base+num; i = j + step) {
+ if (!fail) {
+ for (j = i; j < base+num; j += step) {
+ if (cis_readable(s, j, step))
+ break;
+ }
+ fail = ((i == base) && (j == base+num));
+ }
+ if (fail) {
+ for (j = i; j < base+num; j += 2*step)
+ if (checksum_match(s, j, step) &&
+ checksum_match(s, j + step, step))
+ break;
+ }
+ if (i != j) {
+ if (!bad)
+ printk(" excluding");
+ printk(" %#05lx-%#05lx", i, j-1);
+ sub_interval(&s_data->mem_db, i, j-i);
+ bad += j-i;
+ }
}
- }
- printk(bad ? "\n" : " clean.\n");
- return (num - bad);
+ printk(bad ? "\n" : " clean.\n");
+ return num - bad;
}
#ifdef CONFIG_PCMCIA_PROBE
@@ -656,7 +663,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
return res;
}
-static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
+static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
u_long align, int low, struct pcmcia_socket *s)
{
struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev));
@@ -794,7 +801,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
return -EINVAL;
#endif
- for (i=0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
res = s->cb_dev->bus->resource[i];
if (!res)
continue;
@@ -908,14 +915,14 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1),
- "0x%08lx - 0x%08lx\n",
- ((unsigned long) p->base),
- ((unsigned long) p->base + p->num - 1));
+ ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ "0x%08lx - 0x%08lx\n",
+ ((unsigned long) p->base),
+ ((unsigned long) p->base + p->num - 1));
}
mutex_unlock(&rsrc_mutex);
- return (ret);
+ return ret;
}
static ssize_t store_io_db(struct device *dev,
@@ -927,12 +934,13 @@ static ssize_t store_io_db(struct device *dev,
unsigned int add = ADD_MANAGED_RESOURCE;
ssize_t ret = 0;
- ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
if (ret != 2) {
- ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
add = REMOVE_MANAGED_RESOURCE;
if (ret != 2) {
- ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr,
+ &end_addr);
add = ADD_MANAGED_RESOURCE;
if (ret != 2)
return -EINVAL;
@@ -963,14 +971,14 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1),
- "0x%08lx - 0x%08lx\n",
- ((unsigned long) p->base),
- ((unsigned long) p->base + p->num - 1));
+ ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ "0x%08lx - 0x%08lx\n",
+ ((unsigned long) p->base),
+ ((unsigned long) p->base + p->num - 1));
}
mutex_unlock(&rsrc_mutex);
- return (ret);
+ return ret;
}
static ssize_t store_mem_db(struct device *dev,
@@ -982,12 +990,13 @@ static ssize_t store_mem_db(struct device *dev,
unsigned int add = ADD_MANAGED_RESOURCE;
ssize_t ret = 0;
- ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
if (ret != 2) {
- ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
add = REMOVE_MANAGED_RESOURCE;
if (ret != 2) {
- ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr,
+ &end_addr);
add = ADD_MANAGED_RESOURCE;
if (ret != 2)
return -EINVAL;
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 78d5aab542f7..7a456000332a 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -164,7 +164,7 @@ static ssize_t pccard_store_irq_mask(struct device *dev,
if (!count)
return -EINVAL;
- ret = sscanf (buf, "0x%x\n", &mask);
+ ret = sscanf(buf, "0x%x\n", &mask);
if (ret == 1) {
s->irq_mask &= mask;
@@ -278,7 +278,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off
free_tuple:
kfree(tuplebuffer);
- return (ret);
+ return ret;
}
static ssize_t pccard_show_cis(struct kobject *kobj,
@@ -308,7 +308,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
count = pccard_extract_cis(s, buf, off, count);
}
- return (count);
+ return count;
}
static ssize_t pccard_store_cis(struct kobject *kobj,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8be4cc447a17..e4d12acdd525 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -6,7 +6,7 @@
* Changelog:
* Aug 2002: Manfred Spraul <manfred@colorfullife.com>
* Dynamically adjust the size of the bridge resource
- *
+ *
* May 2003: Dominik Brodowski <linux@brodo.de>
* Merge pci_socket.c and yenta.c into one file
*/
@@ -16,13 +16,12 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
-#include <asm/io.h>
-
#include "yenta_socket.h"
#include "i82365.h"
@@ -55,7 +54,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket);
static unsigned int override_bios;
module_param(override_bios, uint, 0000);
-MODULE_PARM_DESC (override_bios, "yenta ignore bios resource allocation");
+MODULE_PARM_DESC(override_bios, "yenta ignore bios resource allocation");
/*
* Generate easy-to-use ways of reading a cardbus sockets
@@ -237,24 +236,42 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
/* i82365SL-DF style */
if (socket->flags & YENTA_16BIT_POWER_DF) {
switch (state->Vcc) {
- case 33: reg |= I365_VCC_3V; break;
- case 50: reg |= I365_VCC_5V; break;
- default: reg = 0; break;
+ case 33:
+ reg |= I365_VCC_3V;
+ break;
+ case 50:
+ reg |= I365_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
case 33:
- case 50: reg |= I365_VPP1_5V; break;
- case 120: reg |= I365_VPP1_12V; break;
+ case 50:
+ reg |= I365_VPP1_5V;
+ break;
+ case 120:
+ reg |= I365_VPP1_12V;
+ break;
}
} else {
/* i82365SL-B style */
switch (state->Vcc) {
- case 50: reg |= I365_VCC_5V; break;
- default: reg = 0; break;
+ case 50:
+ reg |= I365_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
- case 50: reg |= I365_VPP1_5V | I365_VPP2_5V; break;
- case 120: reg |= I365_VPP1_12V | I365_VPP2_12V; break;
+ case 50:
+ reg |= I365_VPP1_5V | I365_VPP2_5V;
+ break;
+ case 120:
+ reg |= I365_VPP1_12V | I365_VPP2_12V;
+ break;
}
}
@@ -263,14 +280,26 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
} else {
u32 reg = 0; /* CB_SC_STPCLK? */
switch (state->Vcc) {
- case 33: reg = CB_SC_VCC_3V; break;
- case 50: reg = CB_SC_VCC_5V; break;
- default: reg = 0; break;
+ case 33:
+ reg = CB_SC_VCC_3V;
+ break;
+ case 50:
+ reg = CB_SC_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
- case 33: reg |= CB_SC_VPP_3V; break;
- case 50: reg |= CB_SC_VPP_5V; break;
- case 120: reg |= CB_SC_VPP_12V; break;
+ case 33:
+ reg |= CB_SC_VPP_3V;
+ break;
+ case 50:
+ reg |= CB_SC_VPP_5V;
+ break;
+ case 120:
+ reg |= CB_SC_VPP_12V;
+ break;
}
if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
cb_writel(socket, CB_SOCKET_CONTROL, reg);
@@ -314,23 +343,29 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
reg = exca_readb(socket, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK);
reg |= I365_PWR_NORESET;
- if (state->flags & SS_PWR_AUTO) reg |= I365_PWR_AUTO;
- if (state->flags & SS_OUTPUT_ENA) reg |= I365_PWR_OUT;
+ if (state->flags & SS_PWR_AUTO)
+ reg |= I365_PWR_AUTO;
+ if (state->flags & SS_OUTPUT_ENA)
+ reg |= I365_PWR_OUT;
if (exca_readb(socket, I365_POWER) != reg)
exca_writeb(socket, I365_POWER, reg);
/* CSC interrupt: no ISA irq for CSC */
reg = I365_CSC_DETECT;
if (state->flags & SS_IOCARD) {
- if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG;
+ if (state->csc_mask & SS_STSCHG)
+ reg |= I365_CSC_STSCHG;
} else {
- if (state->csc_mask & SS_BATDEAD) reg |= I365_CSC_BVD1;
- if (state->csc_mask & SS_BATWARN) reg |= I365_CSC_BVD2;
- if (state->csc_mask & SS_READY) reg |= I365_CSC_READY;
+ if (state->csc_mask & SS_BATDEAD)
+ reg |= I365_CSC_BVD1;
+ if (state->csc_mask & SS_BATWARN)
+ reg |= I365_CSC_BVD2;
+ if (state->csc_mask & SS_READY)
+ reg |= I365_CSC_READY;
}
exca_writeb(socket, I365_CSCINT, reg);
exca_readb(socket, I365_CSC);
- if(sock->zoom_video)
+ if (sock->zoom_video)
sock->zoom_video(sock, state->flags & SS_ZVCARD);
}
config_writew(socket, CB_BRIDGE_CONTROL, bridge);
@@ -368,9 +403,12 @@ static int yenta_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io
exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop);
ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
- if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
- if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
- if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
+ if (io->flags & MAP_0WS)
+ ioctl |= I365_IOCTL_0WS(map);
+ if (io->flags & MAP_16BIT)
+ ioctl |= I365_IOCTL_16BIT(map);
+ if (io->flags & MAP_AUTOSZ)
+ ioctl |= I365_IOCTL_IOCS16(map);
exca_writeb(socket, I365_IOCTL, ioctl);
if (io->flags & MAP_ACTIVE)
@@ -416,10 +454,17 @@ static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *
word = (stop >> 12) & 0x0fff;
switch (to_cycles(mem->speed)) {
- case 0: break;
- case 1: word |= I365_MEM_WS0; break;
- case 2: word |= I365_MEM_WS1; break;
- default: word |= I365_MEM_WS1 | I365_MEM_WS0; break;
+ case 0:
+ break;
+ case 1:
+ word |= I365_MEM_WS0;
+ break;
+ case 2:
+ word |= I365_MEM_WS1;
+ break;
+ default:
+ word |= I365_MEM_WS1 | I365_MEM_WS0;
+ break;
}
exca_writew(socket, I365_MEM(map) + I365_W_STOP, word);
@@ -547,9 +592,9 @@ static int yenta_sock_suspend(struct pcmcia_socket *sock)
* max 4 MB, min 16 kB. We try very hard to not get below
* the "ACC" values, though.
*/
-#define BRIDGE_MEM_MAX 4*1024*1024
-#define BRIDGE_MEM_ACC 128*1024
-#define BRIDGE_MEM_MIN 16*1024
+#define BRIDGE_MEM_MAX (4*1024*1024)
+#define BRIDGE_MEM_ACC (128*1024)
+#define BRIDGE_MEM_MIN (16*1024)
#define BRIDGE_IO_MAX 512
#define BRIDGE_IO_ACC 256
@@ -574,7 +619,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
int i;
size = BRIDGE_MEM_MAX;
if (size > avail/8) {
- size=(avail+1)/8;
+ size = (avail+1)/8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
@@ -590,7 +635,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
do {
if (allocate_resource(root, res, size, start, end, align,
- NULL, NULL)==0) {
+ NULL, NULL) == 0) {
return 1;
}
size = size/2;
@@ -605,8 +650,8 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
u32 min)
{
int i;
- for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
- struct resource * root = socket->dev->bus->resource[i];
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ struct resource *root = socket->dev->bus->resource[i];
if (!root)
continue;
@@ -704,7 +749,7 @@ static void yenta_allocate_resources(struct yenta_socket *socket)
static void yenta_free_resources(struct yenta_socket *socket)
{
int i;
- for (i=0;i<4;i++) {
+ for (i = 0; i < 4; i++) {
struct resource *res;
res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i;
if (res->start != 0 && res->end != 0)
@@ -726,7 +771,7 @@ static void __devexit yenta_close(struct pci_dev *dev)
/* we don't want a dying socket registered */
pcmcia_unregister_socket(&sock->socket);
-
+
/* Disable all events so we don't die in an IRQ storm */
cb_writel(sock, CB_SOCKET_MASK, 0x0);
exca_writeb(sock, I365_CSCINT, 0);
@@ -898,7 +943,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
{
struct yenta_socket *socket = (struct yenta_socket *) dev_id;
u8 csc;
- u32 cb_event;
+ u32 cb_event;
/* Clear interrupt status for the event */
cb_event = cb_readl(socket, CB_SOCKET_EVENT);
@@ -1019,7 +1064,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
{
struct list_head *tmp;
unsigned char upper_limit;
- /*
+ /*
* We only check and fix the parent bridge: All systems which need
* this fixup that have been reviewed are laptops and the only bridge
* which needed fixing was the parent bridge of the CardBus bridge:
@@ -1038,7 +1083,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
/* check the bus ranges of all silbling bridges to prevent overlap */
list_for_each(tmp, &bridge_to_fix->parent->children) {
- struct pci_bus * silbling = pci_bus_b(tmp);
+ struct pci_bus *silbling = pci_bus_b(tmp);
/*
* If the silbling has a higher secondary bus number
* and it's secondary is equal or smaller than our
@@ -1083,7 +1128,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
* interrupt, and that we can map the cardbus area. Fill in the
* socket information structure..
*/
-static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct yenta_socket *socket;
int ret;
@@ -1285,7 +1330,7 @@ static int yenta_dev_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops yenta_pm_ops = {
+static const struct dev_pm_ops yenta_pm_ops = {
.suspend_noirq = yenta_dev_suspend_noirq,
.resume_noirq = yenta_dev_resume_noirq,
.resume = yenta_dev_resume,
@@ -1302,7 +1347,7 @@ static struct dev_pm_ops yenta_pm_ops = {
#define YENTA_PM_OPS NULL
#endif
-#define CB_ID(vend,dev,type) \
+#define CB_ID(vend, dev, type) \
{ \
.vendor = vend, \
.device = dev, \
@@ -1313,7 +1358,7 @@ static struct dev_pm_ops yenta_pm_ops = {
.driver_data = CARDBUS_TYPE_##type, \
}
-static struct pci_device_id yenta_table [] = {
+static struct pci_device_id yenta_table[] = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
/*
@@ -1403,13 +1448,13 @@ static struct pci_driver yenta_cardbus_driver = {
static int __init yenta_socket_init(void)
{
- return pci_register_driver (&yenta_cardbus_driver);
+ return pci_register_driver(&yenta_cardbus_driver);
}
-static void __exit yenta_socket_exit (void)
+static void __exit yenta_socket_exit(void)
{
- pci_unregister_driver (&yenta_cardbus_driver);
+ pci_unregister_driver(&yenta_cardbus_driver);
}
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index ab64522aaa64..be27aa47e810 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -460,7 +460,7 @@ static int acerhdf_remove(struct platform_device *device)
return 0;
}
-static struct dev_pm_ops acerhdf_pm_ops = {
+static const struct dev_pm_ops acerhdf_pm_ops = {
.suspend = acerhdf_suspend,
.freeze = acerhdf_suspend,
};
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4226e5352738..e647a856b9bf 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -154,7 +154,7 @@ static struct eeepc_hotk *ehotk;
static int eeepc_hotk_thaw(struct device *device);
static int eeepc_hotk_restore(struct device *device);
-static struct dev_pm_ops eeepc_pm_ops = {
+static const struct dev_pm_ops eeepc_pm_ops = {
.thaw = eeepc_hotk_thaw,
.restore = eeepc_hotk_restore,
};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index c2842171cec6..f00a71c58e69 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -94,7 +94,7 @@ static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
-static struct dev_pm_ops hp_wmi_pm_ops = {
+static const struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0ed84806f8ae..cf61d6a8ef6f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1006,11 +1006,8 @@ static int parse_strtoul(const char *buf,
{
char *endp;
- while (*buf && isspace(*buf))
- buf++;
- *value = simple_strtoul(buf, &endp, 0);
- while (*endp && isspace(*endp))
- endp++;
+ *value = simple_strtoul(skip_spaces(buf), &endp, 0);
+ endp = skip_spaces(endp);
if (*endp || *value > max)
return -EINVAL;
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index c3f1c8e9d254..68b0c04987e4 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -310,8 +310,7 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
goto done;
}
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (!strnicmp(buf, "disable", 7)) {
retval = pnp_disable_dev(dev);
goto done;
@@ -353,19 +352,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
pnp_init_resources(dev);
mutex_lock(&pnp_res_mutex);
while (1) {
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (!strnicmp(buf, "io", 2)) {
- buf += 2;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 2);
start = simple_strtoul(buf, &buf, 0);
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (*buf == '-') {
- buf += 1;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
} else
end = start;
@@ -373,16 +366,11 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
continue;
}
if (!strnicmp(buf, "mem", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (*buf == '-') {
- buf += 1;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
} else
end = start;
@@ -390,17 +378,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
continue;
}
if (!strnicmp(buf, "irq", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_irq_resource(dev, start, 0);
continue;
}
if (!strnicmp(buf, "dma", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_dma_resource(dev, start, 0);
continue;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 118674925516..d4b3d67f0548 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -29,6 +29,13 @@ config APM_POWER
Say Y here to enable support APM status emulation using
battery class devices.
+config WM831X_BACKUP
+ tristate "WM831X backup battery charger support"
+ depends on MFD_WM831X
+ help
+ Say Y here to enable support for the backup battery charger
+ in the Wolfson Microelectronics WM831x PMICs.
+
config WM831X_POWER
tristate "WM831X PMU support"
depends on MFD_WM831X
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 356cdfd3c8b2..573597c683b4 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
+obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index e8b278f71781..ea3fdfaca90d 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -29,15 +29,12 @@
struct pcf50633_mbc {
struct pcf50633 *pcf;
- int adapter_active;
int adapter_online;
- int usb_active;
int usb_online;
struct power_supply usb;
struct power_supply adapter;
-
- struct delayed_work charging_restart_work;
+ struct power_supply ac;
};
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
@@ -47,16 +44,21 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
u8 bits;
int charging_start = 1;
u8 mbcs2, chgmod;
+ unsigned int mbcc5;
- if (ma >= 1000)
+ if (ma >= 1000) {
bits = PCF50633_MBCC7_USB_1000mA;
- else if (ma >= 500)
+ ma = 1000;
+ } else if (ma >= 500) {
bits = PCF50633_MBCC7_USB_500mA;
- else if (ma >= 100)
+ ma = 500;
+ } else if (ma >= 100) {
bits = PCF50633_MBCC7_USB_100mA;
- else {
+ ma = 100;
+ } else {
bits = PCF50633_MBCC7_USB_SUSPEND;
charging_start = 0;
+ ma = 0;
}
ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
@@ -66,21 +68,40 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
else
dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
- /* Manual charging start */
- mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ /*
+ * We limit the charging current to be the USB current limit.
+ * The reason is that on pcf50633, when it enters PMU Standby mode,
+ * which it does when the device goes "off", the USB current limit
+ * reverts to the variant default. In at least one common case, that
+ * default is 500mA. By setting the charging current to be the same
+ * as the USB limit we set here before PMU standby, we enforce it only
+ * using the correct amount of current even when the USB current limit
+ * gets reset to the wrong thing
+ */
+
+ if (mbc->pcf->pdata->charger_reference_current_ma) {
+ mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
+ if (mbcc5 > 255)
+ mbcc5 = 255;
+ pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
+ }
+
+ mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
/* If chgmod == BATFULL, setting chgena has no effect.
- * We need to set resume instead.
+ * Datasheet says we need to set resume instead but when autoresume is
+ * used resume doesn't work. Clear and set chgena instead.
*/
if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
- else
+ else {
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1,
+ PCF50633_MBCC1_CHGENA);
pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
-
- mbc->usb_active = charging_start;
+ PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
+ }
power_supply_changed(&mbc->usb);
@@ -92,20 +113,44 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf)
{
struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
int status = 0;
+ u8 chgmod;
+
+ if (!mbc)
+ return 0;
+
+ chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2)
+ & PCF50633_MBCS2_MBC_MASK;
if (mbc->usb_online)
status |= PCF50633_MBC_USB_ONLINE;
- if (mbc->usb_active)
+ if (chgmod == PCF50633_MBCS2_MBC_USB_PRE ||
+ chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT ||
+ chgmod == PCF50633_MBCS2_MBC_USB_FAST ||
+ chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT)
status |= PCF50633_MBC_USB_ACTIVE;
if (mbc->adapter_online)
status |= PCF50633_MBC_ADAPTER_ONLINE;
- if (mbc->adapter_active)
+ if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_FAST ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT)
status |= PCF50633_MBC_ADAPTER_ACTIVE;
return status;
}
EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf)
+{
+ struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
+
+ if (!mbc)
+ return 0;
+
+ return mbc->usb_online;
+}
+EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status);
+
static ssize_t
show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -156,9 +201,55 @@ static ssize_t set_usblim(struct device *dev,
static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim);
+static ssize_t
+show_chglim(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+ u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5);
+ unsigned int ma;
+
+ if (!mbc->pcf->pdata->charger_reference_current_ma)
+ return -ENODEV;
+
+ ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8;
+
+ return sprintf(buf, "%u\n", ma);
+}
+
+static ssize_t set_chglim(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+ unsigned long ma;
+ unsigned int mbcc5;
+ int ret;
+
+ if (!mbc->pcf->pdata->charger_reference_current_ma)
+ return -ENODEV;
+
+ ret = strict_strtoul(buf, 10, &ma);
+ if (ret)
+ return -EINVAL;
+
+ mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
+ if (mbcc5 > 255)
+ mbcc5 = 255;
+ pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
+
+ return count;
+}
+
+/*
+ * This attribute allows to change MBC charging limit on the fly
+ * independently of usb current limit. It also gets set automatically every
+ * time usb current limit is changed.
+ */
+static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim);
+
static struct attribute *pcf50633_mbc_sysfs_entries[] = {
&dev_attr_chgmode.attr,
&dev_attr_usb_curlim.attr,
+ &dev_attr_chg_curlim.attr,
NULL,
};
@@ -167,76 +258,26 @@ static struct attribute_group mbc_attr_group = {
.attrs = pcf50633_mbc_sysfs_entries,
};
-/* MBC state machine switches into charging mode when the battery voltage
- * falls below 96% of a battery float voltage. But the voltage drop in Li-ion
- * batteries is marginal(1~2 %) till about 80% of its capacity - which means,
- * after a BATFULL, charging won't be restarted until 80%.
- *
- * This work_struct function restarts charging at regular intervals to make
- * sure we don't discharge too much
- */
-
-static void pcf50633_mbc_charging_restart(struct work_struct *work)
-{
- struct pcf50633_mbc *mbc;
- u8 mbcs2, chgmod;
-
- mbc = container_of(work, struct pcf50633_mbc,
- charging_restart_work.work);
-
- mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
- chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
-
- if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
- return;
-
- /* Restart charging */
- pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
- mbc->usb_active = 1;
- power_supply_changed(&mbc->usb);
-
- dev_info(mbc->pcf->dev, "Charging restarted\n");
-}
-
static void
pcf50633_mbc_irq_handler(int irq, void *data)
{
struct pcf50633_mbc *mbc = data;
- int chg_restart_interval =
- mbc->pcf->pdata->charging_restart_interval;
/* USB */
if (irq == PCF50633_IRQ_USBINS) {
mbc->usb_online = 1;
} else if (irq == PCF50633_IRQ_USBREM) {
mbc->usb_online = 0;
- mbc->usb_active = 0;
pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
- cancel_delayed_work_sync(&mbc->charging_restart_work);
}
/* Adapter */
- if (irq == PCF50633_IRQ_ADPINS) {
+ if (irq == PCF50633_IRQ_ADPINS)
mbc->adapter_online = 1;
- mbc->adapter_active = 1;
- } else if (irq == PCF50633_IRQ_ADPREM) {
+ else if (irq == PCF50633_IRQ_ADPREM)
mbc->adapter_online = 0;
- mbc->adapter_active = 0;
- }
-
- if (irq == PCF50633_IRQ_BATFULL) {
- mbc->usb_active = 0;
- mbc->adapter_active = 0;
-
- if (chg_restart_interval > 0)
- schedule_delayed_work(&mbc->charging_restart_work,
- chg_restart_interval);
- } else if (irq == PCF50633_IRQ_USBLIMON)
- mbc->usb_active = 0;
- else if (irq == PCF50633_IRQ_USBLIMOFF)
- mbc->usb_active = 1;
+ power_supply_changed(&mbc->ac);
power_supply_changed(&mbc->usb);
power_supply_changed(&mbc->adapter);
@@ -269,10 +310,34 @@ static int usb_get_property(struct power_supply *psy,
{
struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
int ret = 0;
+ u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
+ PCF50633_MBCC7_USB_MASK;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = mbc->usb_online;
+ val->intval = mbc->usb_online &&
+ (usblim <= PCF50633_MBCC7_USB_500mA);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, ac);
+ int ret = 0;
+ u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
+ PCF50633_MBCC7_USB_MASK;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = mbc->usb_online &&
+ (usblim == PCF50633_MBCC7_USB_1000mA);
break;
default:
ret = -EINVAL;
@@ -303,7 +368,6 @@ static const u8 mbc_irq_handlers[] = {
static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
{
struct pcf50633_mbc *mbc;
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
int ret;
int i;
u8 mbcs1;
@@ -313,7 +377,7 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, mbc);
- mbc->pcf = pdata->pcf;
+ mbc->pcf = dev_to_pcf50633(pdev->dev.parent);
/* Set up IRQ handlers */
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
@@ -337,6 +401,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
mbc->usb.supplied_to = mbc->pcf->pdata->batteries;
mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries;
+ mbc->ac.name = "ac";
+ mbc->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ mbc->ac.properties = power_props;
+ mbc->ac.num_properties = ARRAY_SIZE(power_props);
+ mbc->ac.get_property = ac_get_property;
+ mbc->ac.supplied_to = mbc->pcf->pdata->batteries;
+ mbc->ac.num_supplicants = mbc->pcf->pdata->num_batteries;
+
ret = power_supply_register(&pdev->dev, &mbc->adapter);
if (ret) {
dev_err(mbc->pcf->dev, "failed to register adapter\n");
@@ -352,8 +424,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
return ret;
}
- INIT_DELAYED_WORK(&mbc->charging_restart_work,
- pcf50633_mbc_charging_restart);
+ ret = power_supply_register(&pdev->dev, &mbc->ac);
+ if (ret) {
+ dev_err(mbc->pcf->dev, "failed to register ac\n");
+ power_supply_unregister(&mbc->adapter);
+ power_supply_unregister(&mbc->usb);
+ kfree(mbc);
+ return ret;
+ }
ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
if (ret)
@@ -379,8 +457,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
power_supply_unregister(&mbc->usb);
power_supply_unregister(&mbc->adapter);
-
- cancel_delayed_work_sync(&mbc->charging_restart_work);
+ power_supply_unregister(&mbc->ac);
kfree(mbc);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 08144393d64b..c790e0c77d4b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -65,7 +65,10 @@ static ssize_t power_supply_show_property(struct device *dev,
ret = psy->get_property(psy, off, &value);
if (ret < 0) {
- if (ret != -ENODEV)
+ if (ret == -ENODATA)
+ dev_dbg(dev, "driver has no data for `%s' property\n",
+ attr->attr.name);
+ else if (ret != -ENODEV)
dev_err(dev, "driver failed to report `%s' property\n",
attr->attr.name);
return ret;
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
new file mode 100644
index 000000000000..bf4f387a8009
--- /dev/null
+++ b/drivers/power/wm831x_backup.c
@@ -0,0 +1,233 @@
+/*
+ * Backup battery driver for Wolfson Microelectronics wm831x PMICs
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/pmu.h>
+#include <linux/mfd/wm831x/pdata.h>
+
+struct wm831x_backup {
+ struct wm831x *wm831x;
+ struct power_supply backup;
+};
+
+static int wm831x_backup_read_voltage(struct wm831x *wm831x,
+ enum wm831x_auxadc src,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read_uv(wm831x, src);
+ if (ret >= 0)
+ val->intval = ret;
+
+ return ret;
+}
+
+/*********************************************************************
+ * Backup supply properties
+ *********************************************************************/
+
+static void wm831x_config_backup(struct wm831x *wm831x)
+{
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
+ struct wm831x_backup_pdata *pdata;
+ int ret, reg;
+
+ if (!wm831x_pdata || !wm831x_pdata->backup) {
+ dev_warn(wm831x->dev,
+ "No backup battery charger configuration\n");
+ return;
+ }
+
+ pdata = wm831x_pdata->backup;
+
+ reg = 0;
+
+ if (pdata->charger_enable)
+ reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
+ if (pdata->no_constant_voltage)
+ reg |= WM831X_BKUP_CHG_MODE;
+
+ switch (pdata->vlim) {
+ case 2500:
+ break;
+ case 3100:
+ reg |= WM831X_BKUP_CHG_VLIM;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
+ pdata->vlim);
+ }
+
+ switch (pdata->ilim) {
+ case 100:
+ break;
+ case 200:
+ reg |= 1;
+ break;
+ case 300:
+ reg |= 2;
+ break;
+ case 400:
+ reg |= 3;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
+ pdata->ilim);
+ }
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
+ WM831X_BKUP_CHG_ENA_MASK |
+ WM831X_BKUP_CHG_MODE_MASK |
+ WM831X_BKUP_BATT_DET_ENA_MASK |
+ WM831X_BKUP_CHG_VLIM_MASK |
+ WM831X_BKUP_CHG_ILIM_MASK,
+ reg);
+ if (ret != 0)
+ dev_err(wm831x->dev,
+ "Failed to set backup charger config: %d\n", ret);
+
+ wm831x_reg_lock(wm831x);
+}
+
+static int wm831x_backup_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = devdata->wm831x;
+ int ret = 0;
+
+ ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
+ val);
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_backup_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+/*********************************************************************
+ * Initialisation
+ *********************************************************************/
+
+static __devinit int wm831x_backup_probe(struct platform_device *pdev)
+{
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_backup *devdata;
+ struct power_supply *backup;
+ int ret;
+
+ devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL);
+ if (devdata == NULL)
+ return -ENOMEM;
+
+ devdata->wm831x = wm831x;
+ platform_set_drvdata(pdev, devdata);
+
+ backup = &devdata->backup;
+
+ /* We ignore configuration failures since we can still read
+ * back the status without enabling the charger (which may
+ * already be enabled anyway).
+ */
+ wm831x_config_backup(wm831x);
+
+ backup->name = "wm831x-backup";
+ backup->type = POWER_SUPPLY_TYPE_BATTERY;
+ backup->properties = wm831x_backup_props;
+ backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
+ backup->get_property = wm831x_backup_get_prop;
+ ret = power_supply_register(&pdev->dev, backup);
+ if (ret)
+ goto err_kmalloc;
+
+ return ret;
+
+err_kmalloc:
+ kfree(devdata);
+ return ret;
+}
+
+static __devexit int wm831x_backup_remove(struct platform_device *pdev)
+{
+ struct wm831x_backup *devdata = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&devdata->backup);
+ kfree(devdata);
+
+ return 0;
+}
+
+static struct platform_driver wm831x_backup_driver = {
+ .probe = wm831x_backup_probe,
+ .remove = __devexit_p(wm831x_backup_remove),
+ .driver = {
+ .name = "wm831x-backup",
+ },
+};
+
+static int __init wm831x_backup_init(void)
+{
+ return platform_driver_register(&wm831x_backup_driver);
+}
+module_init(wm831x_backup_init);
+
+static void __exit wm831x_backup_exit(void)
+{
+ platform_driver_unregister(&wm831x_backup_driver);
+}
+module_exit(wm831x_backup_exit);
+
+MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-backup");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 2a4c8b0b829c..f85e80b1b400 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -21,7 +21,6 @@
struct wm831x_power {
struct wm831x *wm831x;
struct power_supply wall;
- struct power_supply backup;
struct power_supply usb;
struct power_supply battery;
};
@@ -454,125 +453,6 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data)
/*********************************************************************
- * Backup supply properties
- *********************************************************************/
-
-static void wm831x_config_backup(struct wm831x *wm831x)
-{
- struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
- struct wm831x_backup_pdata *pdata;
- int ret, reg;
-
- if (!wm831x_pdata || !wm831x_pdata->backup) {
- dev_warn(wm831x->dev,
- "No backup battery charger configuration\n");
- return;
- }
-
- pdata = wm831x_pdata->backup;
-
- reg = 0;
-
- if (pdata->charger_enable)
- reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
- if (pdata->no_constant_voltage)
- reg |= WM831X_BKUP_CHG_MODE;
-
- switch (pdata->vlim) {
- case 2500:
- break;
- case 3100:
- reg |= WM831X_BKUP_CHG_VLIM;
- break;
- default:
- dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
- pdata->vlim);
- }
-
- switch (pdata->ilim) {
- case 100:
- break;
- case 200:
- reg |= 1;
- break;
- case 300:
- reg |= 2;
- break;
- case 400:
- reg |= 3;
- break;
- default:
- dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
- pdata->ilim);
- }
-
- ret = wm831x_reg_unlock(wm831x);
- if (ret != 0) {
- dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
- return;
- }
-
- ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
- WM831X_BKUP_CHG_ENA_MASK |
- WM831X_BKUP_CHG_MODE_MASK |
- WM831X_BKUP_BATT_DET_ENA_MASK |
- WM831X_BKUP_CHG_VLIM_MASK |
- WM831X_BKUP_CHG_ILIM_MASK,
- reg);
- if (ret != 0)
- dev_err(wm831x->dev,
- "Failed to set backup charger config: %d\n", ret);
-
- wm831x_reg_lock(wm831x);
-}
-
-static int wm831x_backup_get_prop(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
- struct wm831x *wm831x = wm831x_power->wm831x;
- int ret = 0;
-
- ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
- if (ret < 0)
- return ret;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- if (ret & WM831X_BKUP_CHG_STS)
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- break;
-
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
- val);
- break;
-
- case POWER_SUPPLY_PROP_PRESENT:
- if (ret & WM831X_BKUP_CHG_STS)
- val->intval = 1;
- else
- val->intval = 0;
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static enum power_supply_property wm831x_backup_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_PRESENT,
-};
-
-/*********************************************************************
* Initialisation
*********************************************************************/
@@ -595,10 +475,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
dev_dbg(wm831x->dev, "Power source changed\n");
- /* Just notify for everything - little harm in overnotifying.
- * The backup battery is not a power source while the system
- * is running so skip that.
- */
+ /* Just notify for everything - little harm in overnotifying. */
power_supply_changed(&wm831x_power->battery);
power_supply_changed(&wm831x_power->usb);
power_supply_changed(&wm831x_power->wall);
@@ -613,7 +490,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
struct power_supply *usb;
struct power_supply *battery;
struct power_supply *wall;
- struct power_supply *backup;
int ret, irq, i;
power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL);
@@ -626,13 +502,11 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
usb = &power->usb;
battery = &power->battery;
wall = &power->wall;
- backup = &power->backup;
/* We ignore configuration failures since we can still read back
- * the status without enabling either of the chargers.
+ * the status without enabling the charger.
*/
wm831x_config_battery(wm831x);
- wm831x_config_backup(wm831x);
wall->name = "wm831x-wall";
wall->type = POWER_SUPPLY_TYPE_MAINS;
@@ -661,15 +535,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_battery;
- backup->name = "wm831x-backup";
- backup->type = POWER_SUPPLY_TYPE_BATTERY;
- backup->properties = wm831x_backup_props;
- backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
- backup->get_property = wm831x_backup_get_prop;
- ret = power_supply_register(&pdev->dev, backup);
- if (ret)
- goto err_usb;
-
irq = platform_get_irq_byname(pdev, "SYSLO");
ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
IRQF_TRIGGER_RISING, "SYSLO",
@@ -677,7 +542,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
- goto err_backup;
+ goto err_usb;
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
@@ -716,8 +581,6 @@ err_bat_irq:
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
wm831x_free_irq(wm831x, irq, power);
-err_backup:
- power_supply_unregister(backup);
err_usb:
power_supply_unregister(usb);
err_battery:
@@ -746,7 +609,6 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "SYSLO");
wm831x_free_irq(wm831x, irq, wm831x_power);
- power_supply_unregister(&wm831x_power->backup);
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index 28b0299c0043..ad4f071e1287 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -184,8 +184,9 @@ static ssize_t charger_state_show(struct device *dev,
static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
-static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct wm8350_power *power = &wm8350->power;
struct wm8350_charger_policy *policy = power->policy;
@@ -238,6 +239,8 @@ static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data)
default:
dev_err(wm8350->dev, "Unknown interrupt %d\n", irq);
}
+
+ return IRQ_HANDLED;
}
/*********************************************************************
@@ -387,73 +390,55 @@ static void wm8350_init_charger(struct wm8350 *wm8350)
{
/* register our interest in charger events */
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
+ wm8350_charger_handler, 0, "Battery hot", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
+ wm8350_charger_handler, 0, "Battery cold", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
+ wm8350_charger_handler, 0, "Battery fail", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_TO);
+ wm8350_charger_handler, 0,
+ "Charger timeout", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_END);
+ wm8350_charger_handler, 0,
+ "Charge end", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_START);
+ wm8350_charger_handler, 0,
+ "Charge start", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY);
+ wm8350_charger_handler, 0,
+ "Fast charge ready", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
+ wm8350_charger_handler, 0,
+ "Battery <3.9V", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
+ wm8350_charger_handler, 0,
+ "Battery <3.1V", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
+ wm8350_charger_handler, 0,
+ "Battery <2.85V", wm8350);
/* and supply change events */
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
+ wm8350_charger_handler, 0, "USB", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
+ wm8350_charger_handler, 0, "Wall", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
+ wm8350_charger_handler, 0, "Battery", wm8350);
}
static void free_charger_irq(struct wm8350 *wm8350)
{
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_TO);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_END);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_START);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
}
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index f2bfd296dbae..fa39e759a275 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -157,7 +157,7 @@ static int wm97xx_bat_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops wm97xx_bat_pm_ops = {
+static const struct dev_pm_ops wm97xx_bat_pm_ops = {
.suspend = wm97xx_bat_suspend,
.resume = wm97xx_bat_resume,
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index bcbb161bde0b..7cfdd65bebb4 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -70,7 +70,7 @@ config REGULATOR_MAX1586
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
config REGULATOR_TWL4030
- bool "TI TWL4030/TWL5030/TPS695x0 PMIC"
+ bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
depends on TWL4030_CORE
help
This driver supports the voltage regulators provided by
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 4257a8683778..9ae3cc44e668 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
-obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o
+obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 0803ffe6236d..c8f41dc05b76 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -314,13 +314,15 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
struct pcf50633 *pcf;
/* Already set by core driver */
- pcf = platform_get_drvdata(pdev);
+ pcf = dev_to_pcf50633(pdev->dev.parent);
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
pdev->dev.platform_data, pcf);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
+ platform_set_drvdata(pdev, rdev);
+
if (pcf->pdata->regulator_registered)
pcf->pdata->regulator_registered(pcf, pdev->id);
@@ -331,6 +333,7 @@ static int __devexit pcf50633_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(rdev);
return 0;
diff --git a/drivers/regulator/twl4030-regulator.c b/drivers/regulator/twl-regulator.c
index e2032fb60b55..7ea1c3a31081 100644
--- a/drivers/regulator/twl4030-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1,5 +1,5 @@
/*
- * twl4030-regulator.c -- support regulators in twl4030 family chips
+ * twl-regulator.c -- support regulators in twl4030/twl6030 family chips
*
* Copyright (C) 2008 David Brownell
*
@@ -15,11 +15,11 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
- * The TWL4030/TW5030/TPS659x0 family chips include power management, a
+ * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
* USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions
* include an audio codec, battery charger, and more voltage regulators.
* These chips are often used in OMAP-based systems.
@@ -33,7 +33,7 @@ struct twlreg_info {
/* start of regulator's PM_RECEIVER control register bank */
u8 base;
- /* twl4030 resource ID, for resource control state machine */
+ /* twl resource ID, for resource control state machine */
u8 id;
/* voltage in mV = table[VSEL]; table_len must be a power-of-two */
@@ -52,27 +52,38 @@ struct twlreg_info {
* The first three registers of all power resource banks help hardware to
* manage the various resource groups.
*/
+/* Common offset in TWL4030/6030 */
#define VREG_GRP 0
+/* TWL4030 register offsets */
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
-
+/* TWL6030 register offsets */
+#define VREG_TRANS 1
+#define VREG_STATE 2
+#define VREG_VOLTAGE 3
+/* TWL6030 Misc register offsets */
+#define VREG_BC_ALL 1
+#define VREG_BC_REF 2
+#define VREG_BC_PROC 3
+#define VREG_BC_CLK_RST 4
static inline int
-twl4030reg_read(struct twlreg_info *info, unsigned offset)
+twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
u8 value;
int status;
- status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER,
+ status = twl_i2c_read_u8(slave_subgp,
&value, info->base + offset);
return (status < 0) ? status : value;
}
static inline int
-twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value)
+twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset,
+ u8 value)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ return twl_i2c_write_u8(slave_subgp,
value, info->base + offset);
}
@@ -80,59 +91,79 @@ twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value)
/* generic power resource operations, which work on all regulators */
-static int twl4030reg_grp(struct regulator_dev *rdev)
+static int twlreg_grp(struct regulator_dev *rdev)
{
- return twl4030reg_read(rdev_get_drvdata(rdev), VREG_GRP);
+ return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER,
+ VREG_GRP);
}
/*
* Enable/disable regulators by joining/leaving the P1 (processor) group.
* We assume nobody else is updating the DEV_GRP registers.
*/
-
-#define P3_GRP BIT(7) /* "peripherals" */
-#define P2_GRP BIT(6) /* secondary processor, modem, etc */
-#define P1_GRP BIT(5) /* CPU/Linux */
-
-static int twl4030reg_is_enabled(struct regulator_dev *rdev)
+/* definition for 4030 family */
+#define P3_GRP_4030 BIT(7) /* "peripherals" */
+#define P2_GRP_4030 BIT(6) /* secondary processor, modem, etc */
+#define P1_GRP_4030 BIT(5) /* CPU/Linux */
+/* definition for 6030 family */
+#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */
+#define P2_GRP_6030 BIT(1) /* "peripherals" */
+#define P1_GRP_6030 BIT(0) /* CPU/Linux */
+
+static int twlreg_is_enabled(struct regulator_dev *rdev)
{
- int state = twl4030reg_grp(rdev);
+ int state = twlreg_grp(rdev);
if (state < 0)
return state;
- return (state & P1_GRP) != 0;
+ if (twl_class_is_4030())
+ state &= P1_GRP_4030;
+ else
+ state &= P1_GRP_6030;
+ return state;
}
-static int twl4030reg_enable(struct regulator_dev *rdev)
+static int twlreg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
- grp = twl4030reg_read(info, VREG_GRP);
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- grp |= P1_GRP;
- return twl4030reg_write(info, VREG_GRP, grp);
+ if (twl_class_is_4030())
+ grp |= P1_GRP_4030;
+ else
+ grp |= P1_GRP_6030;
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
-static int twl4030reg_disable(struct regulator_dev *rdev)
+static int twlreg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
- grp = twl4030reg_read(info, VREG_GRP);
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- grp &= ~P1_GRP;
- return twl4030reg_write(info, VREG_GRP, grp);
+ if (twl_class_is_4030())
+ grp &= ~P1_GRP_4030;
+ else
+ grp &= ~P1_GRP_6030;
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
-static int twl4030reg_get_status(struct regulator_dev *rdev)
+static int twlreg_get_status(struct regulator_dev *rdev)
{
- int state = twl4030reg_grp(rdev);
+ int state = twlreg_grp(rdev);
+
+ if (twl_class_is_6030())
+ return 0; /* FIXME return for 6030 regulator */
if (state < 0)
return state;
@@ -146,12 +177,15 @@ static int twl4030reg_get_status(struct regulator_dev *rdev)
: REGULATOR_STATUS_STANDBY;
}
-static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
int status;
+ if (twl_class_is_6030())
+ return 0; /* FIXME return for 6030 regulator */
+
/* We can only set the mode through state machine commands... */
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -165,18 +199,18 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
}
/* Ensure the resource is associated with some group */
- status = twl4030reg_grp(rdev);
+ status = twlreg_grp(rdev);
if (status < 0)
return status;
- if (!(status & (P3_GRP | P2_GRP | P1_GRP)))
+ if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030)))
return -EACCES;
- status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
message >> 8, 0x15 /* PB_WORD_MSB */ );
if (status >= 0)
return status;
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
message, 0x16 /* PB_WORD_LSB */ );
}
@@ -260,9 +294,31 @@ static const u16 VSIM_VSEL_table[] = {
static const u16 VDAC_VSEL_table[] = {
1200, 1300, 1800, 1800,
};
+static const u16 VAUX1_6030_VSEL_table[] = {
+ 1000, 1300, 1800, 2500,
+ 2800, 2900, 3000, 3000,
+};
+static const u16 VAUX2_6030_VSEL_table[] = {
+ 1200, 1800, 2500, 2750,
+ 2800, 2800, 2800, 2800,
+};
+static const u16 VAUX3_6030_VSEL_table[] = {
+ 1000, 1200, 1300, 1800,
+ 2500, 2800, 3000, 3000,
+};
+static const u16 VMMC_VSEL_table[] = {
+ 1200, 1800, 2800, 2900,
+ 3000, 3000, 3000, 3000,
+};
+static const u16 VPP_VSEL_table[] = {
+ 1800, 1900, 2000, 2100,
+ 2200, 2300, 2400, 2500,
+};
+static const u16 VUSIM_VSEL_table[] = {
+ 1200, 1800, 2500, 2900,
+};
-
-static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int mV = info->table[index];
@@ -271,7 +327,7 @@ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -288,16 +344,18 @@ twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
/* use the first in-range value */
if (min_uV <= uV && uV <= max_uV)
- return twl4030reg_write(info, VREG_DEDICATED, vsel);
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE, vsel);
}
return -EDOM;
}
-static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
+static int twlldo_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twl4030reg_read(info, VREG_DEDICATED);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE);
if (vsel < 0)
return vsel;
@@ -306,19 +364,19 @@ static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
return LDO_MV(info->table[vsel]) * 1000;
}
-static struct regulator_ops twl4030ldo_ops = {
- .list_voltage = twl4030ldo_list_voltage,
+static struct regulator_ops twlldo_ops = {
+ .list_voltage = twlldo_list_voltage,
- .set_voltage = twl4030ldo_set_voltage,
- .get_voltage = twl4030ldo_get_voltage,
+ .set_voltage = twlldo_set_voltage,
+ .get_voltage = twlldo_get_voltage,
- .enable = twl4030reg_enable,
- .disable = twl4030reg_disable,
- .is_enabled = twl4030reg_is_enabled,
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
- .set_mode = twl4030reg_set_mode,
+ .set_mode = twlreg_set_mode,
- .get_status = twl4030reg_get_status,
+ .get_status = twlreg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -326,60 +384,69 @@ static struct regulator_ops twl4030ldo_ops = {
/*
* Fixed voltage LDOs don't have a VSEL field to update.
*/
-static int twl4030fixed_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twlfixed_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return info->min_mV * 1000;
}
-static int twl4030fixed_get_voltage(struct regulator_dev *rdev)
+static int twlfixed_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return info->min_mV * 1000;
}
-static struct regulator_ops twl4030fixed_ops = {
- .list_voltage = twl4030fixed_list_voltage,
+static struct regulator_ops twlfixed_ops = {
+ .list_voltage = twlfixed_list_voltage,
- .get_voltage = twl4030fixed_get_voltage,
+ .get_voltage = twlfixed_get_voltage,
- .enable = twl4030reg_enable,
- .disable = twl4030reg_disable,
- .is_enabled = twl4030reg_is_enabled,
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
- .set_mode = twl4030reg_set_mode,
+ .set_mode = twlreg_set_mode,
- .get_status = twl4030reg_get_status,
+ .get_status = twlreg_get_status,
};
/*----------------------------------------------------------------------*/
-#define TWL_ADJUSTABLE_LDO(label, offset, num) { \
+#define TWL4030_ADJUSTABLE_LDO(label, offset, num) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, TWL4030)
+#define TWL4030_FIXED_LDO(label, offset, mVolts, num) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, TWL4030)
+#define TWL6030_ADJUSTABLE_LDO(label, offset, num) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, TWL6030)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, TWL6030)
+
+#define TWL_ADJUSTABLE_LDO(label, offset, num, family) { \
.base = offset, \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
.table = label##_VSEL_table, \
.desc = { \
.name = #label, \
- .id = TWL4030_REG_##label, \
+ .id = family##_REG_##label, \
.n_voltages = ARRAY_SIZE(label##_VSEL_table), \
- .ops = &twl4030ldo_ops, \
+ .ops = &twlldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL_FIXED_LDO(label, offset, mVolts, num) { \
+#define TWL_FIXED_LDO(label, offset, mVolts, num, family) { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
.desc = { \
.name = #label, \
- .id = TWL4030_REG_##label, \
+ .id = family##_REG_##label, \
.n_voltages = 1, \
- .ops = &twl4030fixed_ops, \
+ .ops = &twlfixed_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
@@ -389,35 +456,47 @@ static struct regulator_ops twl4030fixed_ops = {
* We list regulators here if systems need some level of
* software control over them after boot.
*/
-static struct twlreg_info twl4030_regs[] = {
- TWL_ADJUSTABLE_LDO(VAUX1, 0x17, 1),
- TWL_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2),
- TWL_ADJUSTABLE_LDO(VAUX2, 0x1b, 2),
- TWL_ADJUSTABLE_LDO(VAUX3, 0x1f, 3),
- TWL_ADJUSTABLE_LDO(VAUX4, 0x23, 4),
- TWL_ADJUSTABLE_LDO(VMMC1, 0x27, 5),
- TWL_ADJUSTABLE_LDO(VMMC2, 0x2b, 6),
+static struct twlreg_info twl_regs[] = {
+ TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1),
+ TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2),
+ TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2),
+ TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3),
+ TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4),
+ TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5),
+ TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6),
/*
- TWL_ADJUSTABLE_LDO(VPLL1, 0x2f, 7),
+ TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7),
*/
- TWL_ADJUSTABLE_LDO(VPLL2, 0x33, 8),
- TWL_ADJUSTABLE_LDO(VSIM, 0x37, 9),
- TWL_ADJUSTABLE_LDO(VDAC, 0x3b, 10),
+ TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8),
+ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9),
+ TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10),
/*
- TWL_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11),
- TWL_ADJUSTABLE_LDO(VINTANA2, 0x43, 12),
- TWL_ADJUSTABLE_LDO(VINTDIG, 0x47, 13),
- TWL_SMPS(VIO, 0x4b, 14),
- TWL_SMPS(VDD1, 0x55, 15),
- TWL_SMPS(VDD2, 0x63, 16),
+ TWL4030_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11),
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12),
+ TWL4030_ADJUSTABLE_LDO(VINTDIG, 0x47, 13),
+ TWL4030_SMPS(VIO, 0x4b, 14),
+ TWL4030_SMPS(VDD1, 0x55, 15),
+ TWL4030_SMPS(VDD2, 0x63, 16),
*/
- TWL_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
- TWL_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
- TWL_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
+ TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
+ TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
+ TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
/* VUSBCP is managed *only* by the USB subchip */
+
+ /* 6030 REG with base as PMC Slave Misc : 0x0030 */
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18)
};
-static int twl4030reg_probe(struct platform_device *pdev)
+static int twlreg_probe(struct platform_device *pdev)
{
int i;
struct twlreg_info *info;
@@ -425,10 +504,10 @@ static int twl4030reg_probe(struct platform_device *pdev)
struct regulation_constraints *c;
struct regulator_dev *rdev;
- for (i = 0, info = NULL; i < ARRAY_SIZE(twl4030_regs); i++) {
- if (twl4030_regs[i].desc.id != pdev->id)
+ for (i = 0, info = NULL; i < ARRAY_SIZE(twl_regs); i++) {
+ if (twl_regs[i].desc.id != pdev->id)
continue;
- info = twl4030_regs + i;
+ info = twl_regs + i;
break;
}
if (!info)
@@ -466,35 +545,35 @@ static int twl4030reg_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit twl4030reg_remove(struct platform_device *pdev)
+static int __devexit twlreg_remove(struct platform_device *pdev)
{
regulator_unregister(platform_get_drvdata(pdev));
return 0;
}
-MODULE_ALIAS("platform:twl4030_reg");
+MODULE_ALIAS("platform:twl_reg");
-static struct platform_driver twl4030reg_driver = {
- .probe = twl4030reg_probe,
- .remove = __devexit_p(twl4030reg_remove),
+static struct platform_driver twlreg_driver = {
+ .probe = twlreg_probe,
+ .remove = __devexit_p(twlreg_remove),
/* NOTE: short name, to work around driver model truncation of
- * "twl4030_regulator.12" (and friends) to "twl4030_regulator.1".
+ * "twl_regulator.12" (and friends) to "twl_regulator.1".
*/
- .driver.name = "twl4030_reg",
+ .driver.name = "twl_reg",
.driver.owner = THIS_MODULE,
};
-static int __init twl4030reg_init(void)
+static int __init twlreg_init(void)
{
- return platform_driver_register(&twl4030reg_driver);
+ return platform_driver_register(&twlreg_driver);
}
-subsys_initcall(twl4030reg_init);
+subsys_initcall(twlreg_init);
-static void __exit twl4030reg_exit(void)
+static void __exit twlreg_exit(void)
{
- platform_driver_unregister(&twl4030reg_driver);
+ platform_driver_unregister(&twlreg_driver);
}
-module_exit(twl4030reg_exit)
+module_exit(twlreg_exit)
-MODULE_DESCRIPTION("TWL4030 regulator driver");
+MODULE_DESCRIPTION("TWL regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 768bd0e5b48b..1bbff099a546 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1330,9 +1330,10 @@ static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
},
};
-static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t pmic_uv_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
+ struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
mutex_lock(&rdev->mutex);
if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2)
@@ -1344,6 +1345,8 @@ static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data)
REGULATOR_EVENT_UNDER_VOLTAGE,
wm8350);
mutex_unlock(&rdev->mutex);
+
+ return IRQ_HANDLED;
}
static int wm8350_regulator_probe(struct platform_device *pdev)
@@ -1388,7 +1391,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
/* register regulator IRQ */
ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq,
- pmic_uv_handler, rdev);
+ pmic_uv_handler, 0, "UV", rdev);
if (ret < 0) {
regulator_unregister(rdev);
dev_err(&pdev->dev, "failed to register regulator %s IRQ\n",
@@ -1396,8 +1399,6 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
return ret;
}
- wm8350_unmask_irq(wm8350, wm8350_reg[pdev->id].irq);
-
return 0;
}
@@ -1406,7 +1407,6 @@ static int wm8350_regulator_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- wm8350_mask_irq(wm8350, wm8350_reg[pdev->id].irq);
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq);
regulator_unregister(rdev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f2e1004d12c7..71fbd6e8edf7 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -258,14 +258,14 @@ config RTC_DRV_TWL92330
the Menelaus driver; it's not separate module.
config RTC_DRV_TWL4030
- tristate "TI TWL4030/TWL5030/TPS659x0"
+ tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0"
depends on RTC_CLASS && TWL4030_CORE
help
If you say yes here you get support for the RTC on the
- TWL4030 family chips, used mostly with OMAP3 platforms.
+ TWL4030/TWL5030/TWL6030 family chips, used mostly with OMAP3 platforms.
This driver can also be built as a module. If so, the module
- will be called rtc-twl4030.
+ will be called rtc-twl.
config RTC_DRV_S35390A
tristate "Seiko Instruments S-35390A"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index af1ba7ae2857..7da6efb3e953 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -80,7 +80,7 @@ obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
-obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
+obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 4c5d5d0c4cfc..9b74e9c9151c 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -277,16 +277,13 @@ static void pcf50633_rtc_irq(int irq, void *data)
static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
{
- struct pcf50633_subdev_pdata *pdata;
struct pcf50633_rtc *rtc;
-
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- rtc->pcf = pdata->pcf;
+ rtc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = rtc_device_register("pcf50633-rtc", &pdev->dev,
&pcf50633_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 747ca194fad4..e6351b743da6 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -456,7 +456,7 @@ static int pxa_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxa_rtc_pm_ops = {
+static const struct dev_pm_ops pxa_rtc_pm_ops = {
.suspend = pxa_rtc_suspend,
.resume = pxa_rtc_resume,
};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 29f98a70586e..e4a44b641702 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -407,7 +407,7 @@ static int sa1100_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sa1100_rtc_pm_ops = {
+static const struct dev_pm_ops sa1100_rtc_pm_ops = {
.suspend = sa1100_rtc_suspend,
.resume = sa1100_rtc_resume,
};
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index e6ed5404bca0..e95cc6f8d61e 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -826,7 +826,7 @@ static int sh_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_rtc_dev_pm_ops = {
+static const struct dev_pm_ops sh_rtc_dev_pm_ops = {
.suspend = sh_rtc_suspend,
.resume = sh_rtc_resume,
};
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl.c
index 9c8c70c497dc..c6a83a2a722c 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl.c
@@ -1,5 +1,5 @@
/*
- * rtc-twl4030.c -- TWL4030 Real Time Clock interface
+ * rtc-twl.c -- TWL Real Time Clock interface
*
* Copyright (C) 2007 MontaVista Software, Inc
* Author: Alexandre Rusev <source@mvista.com>
@@ -28,33 +28,81 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
* RTC block register offsets (use TWL_MODULE_RTC)
*/
-#define REG_SECONDS_REG 0x00
-#define REG_MINUTES_REG 0x01
-#define REG_HOURS_REG 0x02
-#define REG_DAYS_REG 0x03
-#define REG_MONTHS_REG 0x04
-#define REG_YEARS_REG 0x05
-#define REG_WEEKS_REG 0x06
-
-#define REG_ALARM_SECONDS_REG 0x07
-#define REG_ALARM_MINUTES_REG 0x08
-#define REG_ALARM_HOURS_REG 0x09
-#define REG_ALARM_DAYS_REG 0x0A
-#define REG_ALARM_MONTHS_REG 0x0B
-#define REG_ALARM_YEARS_REG 0x0C
-
-#define REG_RTC_CTRL_REG 0x0D
-#define REG_RTC_STATUS_REG 0x0E
-#define REG_RTC_INTERRUPTS_REG 0x0F
-
-#define REG_RTC_COMP_LSB_REG 0x10
-#define REG_RTC_COMP_MSB_REG 0x11
+enum {
+ REG_SECONDS_REG = 0,
+ REG_MINUTES_REG,
+ REG_HOURS_REG,
+ REG_DAYS_REG,
+ REG_MONTHS_REG,
+ REG_YEARS_REG,
+ REG_WEEKS_REG,
+
+ REG_ALARM_SECONDS_REG,
+ REG_ALARM_MINUTES_REG,
+ REG_ALARM_HOURS_REG,
+ REG_ALARM_DAYS_REG,
+ REG_ALARM_MONTHS_REG,
+ REG_ALARM_YEARS_REG,
+
+ REG_RTC_CTRL_REG,
+ REG_RTC_STATUS_REG,
+ REG_RTC_INTERRUPTS_REG,
+
+ REG_RTC_COMP_LSB_REG,
+ REG_RTC_COMP_MSB_REG,
+};
+const static u8 twl4030_rtc_reg_map[] = {
+ [REG_SECONDS_REG] = 0x00,
+ [REG_MINUTES_REG] = 0x01,
+ [REG_HOURS_REG] = 0x02,
+ [REG_DAYS_REG] = 0x03,
+ [REG_MONTHS_REG] = 0x04,
+ [REG_YEARS_REG] = 0x05,
+ [REG_WEEKS_REG] = 0x06,
+
+ [REG_ALARM_SECONDS_REG] = 0x07,
+ [REG_ALARM_MINUTES_REG] = 0x08,
+ [REG_ALARM_HOURS_REG] = 0x09,
+ [REG_ALARM_DAYS_REG] = 0x0A,
+ [REG_ALARM_MONTHS_REG] = 0x0B,
+ [REG_ALARM_YEARS_REG] = 0x0C,
+
+ [REG_RTC_CTRL_REG] = 0x0D,
+ [REG_RTC_STATUS_REG] = 0x0E,
+ [REG_RTC_INTERRUPTS_REG] = 0x0F,
+
+ [REG_RTC_COMP_LSB_REG] = 0x10,
+ [REG_RTC_COMP_MSB_REG] = 0x11,
+};
+const static u8 twl6030_rtc_reg_map[] = {
+ [REG_SECONDS_REG] = 0x00,
+ [REG_MINUTES_REG] = 0x01,
+ [REG_HOURS_REG] = 0x02,
+ [REG_DAYS_REG] = 0x03,
+ [REG_MONTHS_REG] = 0x04,
+ [REG_YEARS_REG] = 0x05,
+ [REG_WEEKS_REG] = 0x06,
+
+ [REG_ALARM_SECONDS_REG] = 0x08,
+ [REG_ALARM_MINUTES_REG] = 0x09,
+ [REG_ALARM_HOURS_REG] = 0x0A,
+ [REG_ALARM_DAYS_REG] = 0x0B,
+ [REG_ALARM_MONTHS_REG] = 0x0C,
+ [REG_ALARM_YEARS_REG] = 0x0D,
+
+ [REG_RTC_CTRL_REG] = 0x10,
+ [REG_RTC_STATUS_REG] = 0x11,
+ [REG_RTC_INTERRUPTS_REG] = 0x12,
+
+ [REG_RTC_COMP_LSB_REG] = 0x13,
+ [REG_RTC_COMP_MSB_REG] = 0x14,
+};
/* RTC_CTRL_REG bitfields */
#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
@@ -84,31 +132,32 @@
#define ALL_TIME_REGS 6
/*----------------------------------------------------------------------*/
+static u8 *rtc_reg_map;
/*
- * Supports 1 byte read from TWL4030 RTC register.
+ * Supports 1 byte read from TWL RTC register.
*/
-static int twl4030_rtc_read_u8(u8 *data, u8 reg)
+static int twl_rtc_read_u8(u8 *data, u8 reg)
{
int ret;
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
+ ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl4030_rtc: Could not read TWL4030"
+ pr_err("twl_rtc: Could not read TWL"
"register %X - error %d\n", reg, ret);
return ret;
}
/*
- * Supports 1 byte write to TWL4030 RTC registers.
+ * Supports 1 byte write to TWL RTC registers.
*/
-static int twl4030_rtc_write_u8(u8 data, u8 reg)
+static int twl_rtc_write_u8(u8 data, u8 reg)
{
int ret;
- ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
+ ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl4030_rtc: Could not write TWL4030"
+ pr_err("twl_rtc: Could not write TWL"
"register %X - error %d\n", reg, ret);
return ret;
}
@@ -129,7 +178,7 @@ static int set_rtc_irq_bit(unsigned char bit)
val = rtc_irq_bits | bit;
val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
- ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
@@ -145,14 +194,14 @@ static int mask_rtc_irq_bit(unsigned char bit)
int ret;
val = rtc_irq_bits & ~bit;
- ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
return ret;
}
-static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
+static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
@@ -164,7 +213,7 @@ static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
return ret;
}
-static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
+static int twl_rtc_update_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
@@ -177,7 +226,7 @@ static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
}
/*
- * Gets current TWL4030 RTC time and date parameters.
+ * Gets current TWL RTC time and date parameters.
*
* The RTC's time/alarm representation is not what gmtime(3) requires
* Linux to use:
@@ -185,24 +234,24 @@ static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
* - Months are 1..12 vs Linux 0-11
* - Years are 0..99 vs Linux 1900..N (we assume 21st century)
*/
-static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
u8 save_control;
- ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
- ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
- ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
- REG_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_time error %d\n", ret);
@@ -219,7 +268,7 @@ static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
return ret;
}
-static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char save_control;
unsigned char rtc_data[ALL_TIME_REGS + 1];
@@ -233,18 +282,18 @@ static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[6] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
- ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
- twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
/* update all the time registers in one shot */
- ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
- REG_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_set_time error %d\n", ret);
goto out;
@@ -252,22 +301,22 @@ static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Start back RTC */
save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
out:
return ret;
}
/*
- * Gets current TWL4030 RTC alarm time.
+ * Gets current TWL RTC alarm time.
*/
-static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
- ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
- REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
@@ -288,12 +337,12 @@ static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
}
-static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char alarm_data[ALL_TIME_REGS + 1];
int ret;
- ret = twl4030_rtc_alarm_irq_enable(dev, 0);
+ ret = twl_rtc_alarm_irq_enable(dev, 0);
if (ret)
goto out;
@@ -305,20 +354,20 @@ static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
/* update all the alarm registers in one shot */
- ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
- REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
+ (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
goto out;
}
if (alm->enabled)
- ret = twl4030_rtc_alarm_irq_enable(dev, 1);
+ ret = twl_rtc_alarm_irq_enable(dev, 1);
out:
return ret;
}
-static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
+static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
{
unsigned long events = 0;
int ret = IRQ_NONE;
@@ -333,7 +382,7 @@ static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
local_irq_enable();
#endif
- res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
/*
@@ -347,26 +396,28 @@ static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
else
events |= RTC_IRQF | RTC_UF;
- res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
+ res = twl_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
REG_RTC_STATUS_REG);
if (res)
goto out;
- /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
- * needs 2 reads to clear the interrupt. One read is done in
- * do_twl4030_pwrirq(). Doing the second read, to clear
- * the bit.
- *
- * FIXME the reason PWR_ISR1 needs an extra read is that
- * RTC_IF retriggered until we cleared REG_ALARM_M above.
- * But re-reading like this is a bad hack; by doing so we
- * risk wrongly clearing status for some other IRQ (losing
- * the interrupt). Be smarter about handling RTC_UF ...
- */
- res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
+ if (twl_class_is_4030()) {
+ /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
+ * needs 2 reads to clear the interrupt. One read is done in
+ * do_twl_pwrirq(). Doing the second read, to clear
+ * the bit.
+ *
+ * FIXME the reason PWR_ISR1 needs an extra read is that
+ * RTC_IF retriggered until we cleared REG_ALARM_M above.
+ * But re-reading like this is a bad hack; by doing so we
+ * risk wrongly clearing status for some other IRQ (losing
+ * the interrupt). Be smarter about handling RTC_UF ...
+ */
+ res = twl_i2c_read_u8(TWL4030_MODULE_INT,
&rd_reg, TWL4030_INT_PWR_ISR1);
- if (res)
- goto out;
+ if (res)
+ goto out;
+ }
/* Notify RTC core on event */
rtc_update_irq(rtc, 1, events);
@@ -376,18 +427,18 @@ out:
return ret;
}
-static struct rtc_class_ops twl4030_rtc_ops = {
- .read_time = twl4030_rtc_read_time,
- .set_time = twl4030_rtc_set_time,
- .read_alarm = twl4030_rtc_read_alarm,
- .set_alarm = twl4030_rtc_set_alarm,
- .alarm_irq_enable = twl4030_rtc_alarm_irq_enable,
- .update_irq_enable = twl4030_rtc_update_irq_enable,
+static struct rtc_class_ops twl_rtc_ops = {
+ .read_time = twl_rtc_read_time,
+ .set_time = twl_rtc_set_time,
+ .read_alarm = twl_rtc_read_alarm,
+ .set_alarm = twl_rtc_set_alarm,
+ .alarm_irq_enable = twl_rtc_alarm_irq_enable,
+ .update_irq_enable = twl_rtc_update_irq_enable,
};
/*----------------------------------------------------------------------*/
-static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
+static int __devinit twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
int ret = 0;
@@ -398,7 +449,7 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
return -EINVAL;
rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
+ &pdev->dev, &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
@@ -409,7 +460,7 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
@@ -420,11 +471,11 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
/* Clear RTC Power up reset and pending alarm interrupts */
- ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
- ret = request_irq(irq, twl4030_rtc_interrupt,
+ ret = request_irq(irq, twl_rtc_interrupt,
IRQF_TRIGGER_RISING,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
@@ -432,21 +483,28 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
goto out1;
}
+ if (twl_class_is_6030()) {
+ twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_LINE_A);
+ twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_STS_A);
+ }
+
/* Check RTC module status, Enable if it is off */
- ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
- dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
+ dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
}
/* init cached IRQ enable bits */
- ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
goto out2;
@@ -461,10 +519,10 @@ out0:
}
/*
- * Disable all TWL4030 RTC module interrupts.
+ * Disable all TWL RTC module interrupts.
* Sets status flag to free.
*/
-static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
+static int __devexit twl_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
struct rtc_device *rtc = platform_get_drvdata(pdev);
@@ -472,6 +530,13 @@ static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ if (twl_class_is_6030()) {
+ twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_LINE_A);
+ twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_STS_A);
+ }
+
free_irq(irq, rtc);
@@ -480,7 +545,7 @@ static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
return 0;
}
-static void twl4030_rtc_shutdown(struct platform_device *pdev)
+static void twl_rtc_shutdown(struct platform_device *pdev)
{
/* mask timer interrupts, but leave alarm interrupts on to enable
power-on when alarm is triggered */
@@ -491,7 +556,7 @@ static void twl4030_rtc_shutdown(struct platform_device *pdev)
static unsigned char irqstat;
-static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int twl_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
irqstat = rtc_irq_bits;
@@ -499,42 +564,47 @@ static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int twl4030_rtc_resume(struct platform_device *pdev)
+static int twl_rtc_resume(struct platform_device *pdev)
{
set_rtc_irq_bit(irqstat);
return 0;
}
#else
-#define twl4030_rtc_suspend NULL
-#define twl4030_rtc_resume NULL
+#define twl_rtc_suspend NULL
+#define twl_rtc_resume NULL
#endif
-MODULE_ALIAS("platform:twl4030_rtc");
+MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
- .probe = twl4030_rtc_probe,
- .remove = __devexit_p(twl4030_rtc_remove),
- .shutdown = twl4030_rtc_shutdown,
- .suspend = twl4030_rtc_suspend,
- .resume = twl4030_rtc_resume,
+ .probe = twl_rtc_probe,
+ .remove = __devexit_p(twl_rtc_remove),
+ .shutdown = twl_rtc_shutdown,
+ .suspend = twl_rtc_suspend,
+ .resume = twl_rtc_resume,
.driver = {
.owner = THIS_MODULE,
- .name = "twl4030_rtc",
+ .name = "twl_rtc",
},
};
-static int __init twl4030_rtc_init(void)
+static int __init twl_rtc_init(void)
{
+ if (twl_class_is_4030())
+ rtc_reg_map = (u8 *) twl4030_rtc_reg_map;
+ else
+ rtc_reg_map = (u8 *) twl6030_rtc_reg_map;
+
return platform_driver_register(&twl4030rtc_driver);
}
-module_init(twl4030_rtc_init);
+module_init(twl_rtc_init);
-static void __exit twl4030_rtc_exit(void)
+static void __exit twl_rtc_exit(void)
{
platform_driver_unregister(&twl4030rtc_driver);
}
-module_exit(twl4030_rtc_exit);
+module_exit(twl_rtc_exit);
MODULE_AUTHOR("Texas Instruments, MontaVista Software");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 79795cdf6ed8..000c7e481e59 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -485,7 +485,7 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
return 0;
}
-static struct dev_pm_ops wm831x_rtc_pm_ops = {
+static const struct dev_pm_ops wm831x_rtc_pm_ops = {
.suspend = wm831x_rtc_suspend,
.resume = wm831x_rtc_resume,
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index c91edc572eb6..f16486635a8e 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -315,9 +315,9 @@ static int wm8350_rtc_update_irq_enable(struct device *dev,
return 0;
}
-static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
- void *data)
+static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
int ret;
@@ -330,14 +330,18 @@ static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
dev_err(&(wm8350->rtc.pdev->dev),
"Failed to disable alarm: %d\n", ret);
}
+
+ return IRQ_HANDLED;
}
-static void wm8350_rtc_update_handler(struct wm8350 *wm8350, int irq,
- void *data)
+static irqreturn_t wm8350_rtc_update_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF);
+
+ return IRQ_HANDLED;
}
static const struct rtc_class_ops wm8350_rtc_ops = {
@@ -455,15 +459,14 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
return ret;
}
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_PER);
-
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
- wm8350_rtc_update_handler, NULL);
+ wm8350_rtc_update_handler, 0,
+ "RTC Seconds", wm8350);
+ wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
- wm8350_rtc_alarm_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_ALM);
+ wm8350_rtc_alarm_handler, 0,
+ "RTC Alarm", wm8350);
return 0;
}
@@ -473,8 +476,6 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
-
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 5f23eca82804..6315fbd8e68b 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -14,6 +14,7 @@
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
@@ -272,10 +273,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
/* check for valid verbs */
- for (str = buffer; isspace(*str); str++);
+ str = skip_spaces(buffer);
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
/* 'set xxx' was given */
- for (str = str + 4; isspace(*str); str++);
+ str = skip_spaces(str + 4);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
dasd_profile_level = DASD_PROFILE_ON;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f76f4bd82b9f..9b43ae94beba 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops dcssblk_pm_ops = {
+static const struct dev_pm_ops dcssblk_pm_ops = {
.freeze = dcssblk_freeze,
.thaw = dcssblk_thaw,
.restore = dcssblk_restore,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 116d1b3eeb15..118de392af63 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev)
return 0;
}
-static struct dev_pm_ops xpram_pm_ops = {
+static const struct dev_pm_ops xpram_pm_ops = {
.restore = xpram_restore,
};
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 60473f86e1f9..33e96484d54f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -529,7 +529,7 @@ static int monreader_restore(struct device *dev)
return monreader_thaw(dev);
}
-static struct dev_pm_ops monreader_pm_ops = {
+static const struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 6532ed8b4afa..668a0579b26b 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -323,7 +323,7 @@ static int monwriter_thaw(struct device *dev)
return monwriter_restore(dev);
}
-static struct dev_pm_ops monwriter_pm_ops = {
+static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a983f5086788..ec88c59842e3 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev)
return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
}
-static struct dev_pm_ops sclp_pm_ops = {
+static const struct dev_pm_ops sclp_pm_ops = {
.freeze = sclp_freeze,
.thaw = sclp_thaw,
.restore = sclp_restore,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 28b5afc129c3..b3beab610da4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -547,7 +547,7 @@ struct read_storage_sccb {
u32 entries[0];
} __packed;
-static struct dev_pm_ops sclp_mem_pm_ops = {
+static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 899aa795bf38..7dfa5412d5a8 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -675,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev)
}
-static struct dev_pm_ops vmlogrdr_pm_ops = {
+static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f7747..5f97ea2ee6b1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev)
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
-static struct dev_pm_ops ccwgroup_pm_ops = {
+static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92ff88ac1107..7679aee6fa14 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1148,7 +1148,7 @@ static int css_pm_restore(struct device *dev)
return drv->restore ? drv->restore(sch) : 0;
}
-static struct dev_pm_ops css_pm_ops = {
+static const struct dev_pm_ops css_pm_ops = {
.prepare = css_pm_prepare,
.complete = css_pm_complete,
.freeze = css_pm_freeze,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9fecfb4223a8..73901c9e260f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1904,7 +1904,7 @@ out_unlock:
return ret;
}
-static struct dev_pm_ops ccw_pm_ops = {
+static const struct dev_pm_ops ccw_pm_ops = {
.prepare = ccw_device_pm_prepare,
.complete = ccw_device_pm_complete,
.freeze = ccw_device_pm_freeze,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 395c04c2b00f..65ebee0a3266 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
- char* iucv_dbf_txt_buf = \
- get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name, level, \
- iucv_dbf_txt_buf); \
+ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(__buf, text); \
+ debug_text_event(iucv_dbf_##name, level, __buf); \
put_cpu_var(iucv_dbf_txt_buf); \
} \
} while (0)
@@ -161,7 +159,7 @@ static void netiucv_pm_complete(struct device *);
static int netiucv_pm_freeze(struct device *);
static int netiucv_pm_restore_thaw(struct device *);
-static struct dev_pm_ops netiucv_pm_ops = {
+static const struct dev_pm_ops netiucv_pm_ops = {
.prepare = netiucv_pm_prepare,
.complete = netiucv_pm_complete,
.freeze = netiucv_pm_freeze,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3012355f8304..67f2485d2372 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -168,7 +168,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops smsg_pm_ops = {
+static const struct dev_pm_ops smsg_pm_ops = {
.freeze = smsg_pm_freeze,
.thaw = smsg_pm_restore_thaw,
.restore = smsg_pm_restore_thaw,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 206c2fa8c1ba..8643f5089361 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_17_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
- strstrip(error->failure_reason);
+ strim(error->failure_reason);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_07_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
- strstrip(error->failure_reason);
+ strim(error->failure_reason);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 2b38f6ad6e11..8b955b534a36 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
}
}
-static int skip_spaces(char *ptr, int len)
+static int sym_skip_spaces(char *ptr, int len)
{
int cnt, c;
@@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb)
}
#define SKIP_SPACES(ptr, len) \
- if ((arg_len = skip_spaces(ptr, len)) < 1) \
+ if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
return -EINVAL; \
ptr += arg_len; len -= arg_len;
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index d8983dd5c4b2..85dc0410ac1a 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2162,7 +2162,7 @@ static struct ioc3_submodule ioc3uart_ops = {
/**
* ioc3_detect - module init called,
*/
-static int __devinit ioc3uart_init(void)
+static int __init ioc3uart_init(void)
{
int ret;
@@ -2179,7 +2179,7 @@ static int __devinit ioc3uart_init(void)
return ret;
}
-static void __devexit ioc3uart_exit(void)
+static void __exit ioc3uart_exit(void)
{
ioc3_unregister_submodule(&ioc3uart_ops);
uart_unregister_driver(&ioc3_uart);
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 2e02c3026d24..836d9ab4f729 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2904,7 +2904,7 @@ static struct ioc4_submodule ioc4_serial_submodule = {
/**
* ioc4_serial_init - module init
*/
-int ioc4_serial_init(void)
+static int __init ioc4_serial_init(void)
{
int ret;
@@ -2913,20 +2913,30 @@ int ioc4_serial_init(void)
printk(KERN_WARNING
"%s: Couldn't register rs232 IOC4 serial driver\n",
__func__);
- return ret;
+ goto out;
}
if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
printk(KERN_WARNING
"%s: Couldn't register rs422 IOC4 serial driver\n",
__func__);
- return ret;
+ goto out_uart_rs232;
}
/* register with IOC4 main module */
- return ioc4_register_submodule(&ioc4_serial_submodule);
+ ret = ioc4_register_submodule(&ioc4_serial_submodule);
+ if (ret)
+ goto out_uart_rs422;
+ return 0;
+
+out_uart_rs422:
+ uart_unregister_driver(&ioc4_uart_rs422);
+out_uart_rs232:
+ uart_unregister_driver(&ioc4_uart_rs232);
+out:
+ return ret;
}
-static void __devexit ioc4_serial_exit(void)
+static void __exit ioc4_serial_exit(void)
{
ioc4_unregister_submodule(&ioc4_serial_submodule);
uart_unregister_driver(&ioc4_uart_rs232);
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 4a821046baae..56ee082157aa 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -756,7 +756,7 @@ static int serial_pxa_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops serial_pxa_pm_ops = {
+static const struct dev_pm_ops serial_pxa_pm_ops = {
.suspend = serial_pxa_suspend,
.resume = serial_pxa_resume,
};
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ff38dbdb5c6e..7e3f4ff58cfd 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1312,7 +1312,7 @@ static int sci_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sci_dev_pm_ops = {
+static const struct dev_pm_ops sci_dev_pm_ops = {
.suspend = sci_suspend,
.resume = sci_resume,
};
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 816d4c592a3c..66802a4390cc 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -574,11 +574,11 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is)
* Device management *
*********************/
-static char *
+static char * __devinitdata
ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3",
"MENET 4", "CADduo", "Altix Serial"};
-static int ioc3_class(struct ioc3_driver_data *idd)
+static int __devinit ioc3_class(struct ioc3_driver_data *idd)
{
int res = IOC3_CLASS_NONE;
/* NIC-based logic */
@@ -601,7 +601,8 @@ static int ioc3_class(struct ioc3_driver_data *idd)
return res;
}
/* Adds a new instance of an IOC3 card */
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int __devinit
+ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc3_driver_data *idd;
uint32_t pcmd;
@@ -753,7 +754,7 @@ out:
}
/* Removes a particular instance of an IOC3 card. */
-static void ioc3_remove(struct pci_dev *pdev)
+static void __devexit ioc3_remove(struct pci_dev *pdev)
{
int id;
struct ioc3_driver_data *idd;
@@ -805,7 +806,7 @@ static struct pci_driver ioc3_driver = {
.name = "IOC3",
.id_table = ioc3_id_table,
.probe = ioc3_probe,
- .remove = ioc3_remove,
+ .remove = __devexit_p(ioc3_remove),
};
MODULE_DEVICE_TABLE(pci, ioc3_id_table);
@@ -815,15 +816,15 @@ MODULE_DEVICE_TABLE(pci, ioc3_id_table);
*********************/
/* Module load */
-static int __devinit ioc3_init(void)
+static int __init ioc3_init(void)
{
if (ia64_platform_is("sn2"))
return pci_register_driver(&ioc3_driver);
- return 0;
+ return -ENODEV;
}
/* Module unload */
-static void __devexit ioc3_exit(void)
+static void __exit ioc3_exit(void)
{
pci_unregister_driver(&ioc3_driver);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 28fce65b8594..2d9d70359360 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -169,6 +169,12 @@ config SPI_OMAP24XX
SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
(McSPI) modules.
+config SPI_OMAP_100K
+ tristate "OMAP SPI 100K"
+ depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730)
+ help
+ OMAP SPI 100K master controller for omap7xx boards.
+
config SPI_ORION
tristate "Orion SPI master (EXPERIMENTAL)"
depends on PLAT_ORION && EXPERIMENTAL
@@ -220,6 +226,13 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+config SPI_SH_MSIOF
+ tristate "SuperH MSIOF SPI controller"
+ depends on SUPERH && HAVE_CLK
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH MSIOF blocks.
+
config SPI_SH_SCI
tristate "SuperH SCI SPI controller"
depends on SUPERH
@@ -240,15 +253,38 @@ config SPI_TXX9
SPI driver for Toshiba TXx9 MIPS SoCs
config SPI_XILINX
- tristate "Xilinx SPI controller"
- depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
+ tristate "Xilinx SPI controller common module"
+ depends on HAS_IOMEM && EXPERIMENTAL
select SPI_BITBANG
+ select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE)
help
This exposes the SPI controller IP from the Xilinx EDK.
See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
Product Specification document (DS464) for hardware details.
+ Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+
+config SPI_XILINX_OF
+ tristate "Xilinx SPI controller OF device"
+ depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE)
+ help
+ This is the OF driver for the SPI controller IP from the Xilinx EDK.
+
+config SPI_XILINX_PLTFM
+ tristate "Xilinx SPI controller platform device"
+ depends on SPI_XILINX
+ help
+ This is the platform driver for the SPI controller IP
+ from the Xilinx EDK.
+
+config SPI_NUC900
+ tristate "Nuvoton NUC900 series SPI"
+ depends on ARCH_W90X900 && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
+
#
# Add new SPI master controllers in alphabetical order above this line
#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index e3f092a9afa5..ed8c1675b52f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
+obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
obj-$(CONFIG_SPI_ORION) += orion_spi.o
obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
@@ -32,8 +33,12 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 76cbc1a66598..cfd5ff9508fa 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -237,8 +237,14 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
unsigned bpw, hz;
u32 cfg, stat;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
- hz = t ? t->speed_hz : spi->max_speed_hz;
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
+ if (t) {
+ if (t->bits_per_word)
+ bpw = t->bits_per_word;
+ if (t->speed_hz)
+ hz = t->speed_hz;
+ }
if (bpw < 4 || bpw > 24) {
dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n",
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index ef8379b2c172..45bfe6458173 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -18,9 +18,9 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/spi/mpc52xx_spi.h>
#include <linux/of_spi.h>
#include <linux/io.h>
+#include <linux/of_gpio.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
/* FSM state return values */
#define FSM_STOP 0 /* Nothing more for the state machine to */
/* do. If something interesting happens */
- /* then and IRQ will be received */
+ /* then an IRQ will be received */
#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
/* not expected */
#define FSM_CONTINUE 2 /* Keep iterating the state machine */
@@ -61,13 +61,12 @@ MODULE_LICENSE("GPL");
/* Driver internal data */
struct mpc52xx_spi {
struct spi_master *master;
- u32 sysclk;
void __iomem *regs;
int irq0; /* MODF irq */
int irq1; /* SPIF irq */
- int ipb_freq;
+ unsigned int ipb_freq;
- /* Statistics */
+ /* Statistics; not used now, but will be reintroduced for debugfs */
int msg_count;
int wcol_count;
int wcol_ticks;
@@ -79,7 +78,6 @@ struct mpc52xx_spi {
spinlock_t lock;
struct work_struct work;
-
/* Details of current transfer (length, and buffer pointers) */
struct spi_message *message; /* current message */
struct spi_transfer *transfer; /* current transfer */
@@ -89,6 +87,8 @@ struct mpc52xx_spi {
u8 *rx_buf;
const u8 *tx_buf;
int cs_change;
+ int gpio_cs_count;
+ unsigned int *gpio_cs;
};
/*
@@ -96,7 +96,13 @@ struct mpc52xx_spi {
*/
static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
{
- out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+ int cs;
+
+ if (ms->gpio_cs_count > 0) {
+ cs = ms->message->spi->chip_select;
+ gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
+ } else
+ out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
}
/*
@@ -221,7 +227,7 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
ms->wcol_tx_timestamp = get_tbl();
data = 0;
if (ms->tx_buf)
- data = *(ms->tx_buf-1);
+ data = *(ms->tx_buf - 1);
out_8(ms->regs + SPI_DATA, data); /* try again */
return FSM_CONTINUE;
} else if (status & SPI_STATUS_MODF) {
@@ -390,7 +396,9 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
struct spi_master *master;
struct mpc52xx_spi *ms;
void __iomem *regs;
- int rc;
+ u8 ctrl1;
+ int rc, i = 0;
+ int gpio_cs;
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
@@ -399,7 +407,8 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
return -ENODEV;
/* initialize the device */
- out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR);
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ out_8(regs + SPI_CTRL1, ctrl1);
out_8(regs + SPI_CTRL2, 0x0);
out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
@@ -409,6 +418,8 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
* on the SPI bus. This fault will also occur if the SPI signals
* are not connected to any pins (port_config setting) */
in_8(regs + SPI_STATUS);
+ out_8(regs + SPI_CTRL1, ctrl1);
+
in_8(regs + SPI_DATA);
if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
dev_err(&op->dev, "mode fault; is port_config correct?\n");
@@ -422,10 +433,12 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
rc = -ENOMEM;
goto err_alloc;
}
+
master->bus_num = -1;
- master->num_chipselect = 1;
master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
dev_set_drvdata(&op->dev, master);
ms = spi_master_get_devdata(master);
@@ -435,16 +448,51 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
ms->irq1 = irq_of_parse_and_map(op->node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
+ ms->gpio_cs_count = of_gpio_count(op->node);
+ if (ms->gpio_cs_count > 0) {
+ master->num_chipselect = ms->gpio_cs_count;
+ ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!ms->gpio_cs) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (i = 0; i < ms->gpio_cs_count; i++) {
+ gpio_cs = of_get_gpio(op->node, i);
+ if (gpio_cs < 0) {
+ dev_err(&op->dev,
+ "could not parse the gpio field "
+ "in oftree\n");
+ rc = -ENODEV;
+ goto err_gpio;
+ }
+
+ rc = gpio_request(gpio_cs, dev_name(&op->dev));
+ if (rc) {
+ dev_err(&op->dev,
+ "can't request spi cs gpio #%d "
+ "on gpio line %d\n", i, gpio_cs);
+ goto err_gpio;
+ }
+
+ gpio_direction_output(gpio_cs, 1);
+ ms->gpio_cs[i] = gpio_cs;
+ }
+ } else {
+ master->num_chipselect = 1;
+ }
+
spin_lock_init(&ms->lock);
INIT_LIST_HEAD(&ms->queue);
INIT_WORK(&ms->work, mpc52xx_spi_wq);
/* Decide if interrupts can be used */
if (ms->irq0 && ms->irq1) {
- rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
+ rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
"mpc5200-spi-modf", ms);
- rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
- "mpc5200-spi-spiF", ms);
+ rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-spif", ms);
if (rc) {
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
@@ -471,6 +519,11 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
err_register:
dev_err(&ms->master->dev, "initialization failed\n");
spi_master_put(master);
+ err_gpio:
+ while (i-- > 0)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
err_alloc:
err_init:
iounmap(regs);
@@ -481,10 +534,15 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
+ int i;
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
+ for (i = 0; i < ms->gpio_cs_count; i++)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
spi_unregister_master(master);
spi_master_put(master);
iounmap(ms->regs);
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c
new file mode 100644
index 000000000000..5355d90d1bee
--- /dev/null
+++ b/drivers/spi/omap_spi_100k.c
@@ -0,0 +1,635 @@
+/*
+ * OMAP7xx SPI 100k controller driver
+ * Author: Fabrice Crohas <fcrohas@gmail.com>
+ * from original omap1_mcspi driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <linux/spi/spi.h>
+
+#include <plat/clock.h>
+
+#define OMAP1_SPI100K_MAX_FREQ 48000000
+
+#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
+
+#define SPI_SETUP1 0x00
+#define SPI_SETUP2 0x02
+#define SPI_CTRL 0x04
+#define SPI_STATUS 0x06
+#define SPI_TX_LSB 0x08
+#define SPI_TX_MSB 0x0a
+#define SPI_RX_LSB 0x0c
+#define SPI_RX_MSB 0x0e
+
+#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
+#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
+#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
+#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
+
+#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
+#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
+#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
+#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
+#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
+#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
+
+#define SPI_CTRL_SEN(x) ((x) << 7)
+#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
+#define SPI_CTRL_WR (1UL << 1)
+#define SPI_CTRL_RD (1UL << 0)
+
+#define SPI_STATUS_WE (1UL << 1)
+#define SPI_STATUS_RD (1UL << 0)
+
+#define WRITE 0
+#define READ 1
+
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+#define SPI_RUNNING 0
+#define SPI_SHUTDOWN 1
+
+struct omap1_spi100k {
+ struct work_struct work;
+
+ /* lock protects queue and registers */
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct spi_master *master;
+ struct clk *ick;
+ struct clk *fck;
+
+ /* Virtual base address of the controller */
+ void __iomem *base;
+
+ /* State of the SPI */
+ unsigned int state;
+};
+
+struct omap1_spi100k_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static struct workqueue_struct *omap1_spi100k_wq;
+
+#define MOD_REG_BIT(val, mask, set) do { \
+ if (set) \
+ val |= mask; \
+ else \
+ val &= ~mask; \
+} while (0)
+
+static void spi100k_enable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* enable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val |= SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_disable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* disable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val &= ~SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_write_data(struct spi_master *master, int len, int data)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* write 16-bit word */
+ spi100k_enable_clock(master);
+ writew( data , spi100k->base + SPI_TX_MSB);
+
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_WR,
+ spi100k->base + SPI_CTRL);
+
+ /* Wait for bit ack send change */
+ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ udelay(1000);
+
+ spi100k_disable_clock(master);
+}
+
+static int spi100k_read_data(struct spi_master *master, int len)
+{
+ int dataH,dataL;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ spi100k_enable_clock(master);
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_RD,
+ spi100k->base + SPI_CTRL);
+
+ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ udelay(1000);
+
+ dataL = readw(spi100k->base + SPI_RX_LSB);
+ dataH = readw(spi100k->base + SPI_RX_MSB);
+ spi100k_disable_clock(master);
+
+ return dataL;
+}
+
+static void spi100k_open(struct spi_master *master)
+{
+ /* get control of SPI */
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ writew(SPI_SETUP1_INT_READ_ENABLE |
+ SPI_SETUP1_INT_WRITE_ENABLE |
+ SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
+
+ /* configure clock and interrupts */
+ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
+ SPI_SETUP2_NEGATIVE_LEVEL |
+ SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
+}
+
+static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
+{
+ if (enable)
+ writew(0x05fc, spi100k->base + SPI_CTRL);
+ else
+ writew(0x05fd, spi100k->base + SPI_CTRL);
+}
+
+static unsigned
+omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ int word_len;
+
+ spi100k = spi_master_get_devdata(spi->master);
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (xfer->tx_buf == NULL)
+ spi100k_write_data(spi->master,word_len, 0);
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=1;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=2;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=4;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ }
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap1_spi100k_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ u8 word_len = spi->bits_per_word;
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+ if (!word_len)
+ word_len = 8;
+
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+ cs->word_len = word_len;
+
+ /* SPI init before transfer */
+ writew(0x3e , spi100k->base + SPI_SETUP1);
+ writew(0x00 , spi100k->base + SPI_STATUS);
+ writew(0x3e , spi100k->base + SPI_CTRL);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap1_spi100k_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+
+ if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+ dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = spi100k->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ spi100k_open(spi->master);
+
+ clk_enable(spi100k->ick);
+ clk_enable(spi100k->fck);
+
+ ret = omap1_spi100k_setup_transfer(spi, NULL);
+
+ clk_disable(spi100k->ick);
+ clk_disable(spi100k->fck);
+
+ return ret;
+}
+
+static void omap1_spi100k_work(struct work_struct *work)
+{
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ spi100k = container_of(work, struct omap1_spi100k, work);
+ spin_lock_irq(&spi100k->lock);
+
+ clk_enable(spi100k->ick);
+ clk_enable(spi100k->fck);
+
+ /* We only enable one channel at a time -- the one whose message is
+ * at the head of the queue -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+ while (!list_empty(&spi100k->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap1_spi100k_cs *cs;
+ int par_override = 0;
+
+ m = container_of(spi100k->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+ spin_unlock_irq(&spi100k->lock);
+
+ spi = m->spi;
+ cs = spi->controller_state;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
+
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
+
+ if (t->len) {
+ unsigned count;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ spi100k_write_data(spi->master, 8, 0);
+
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
+ }
+ }
+
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
+
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock_irq(&spi100k->lock);
+ }
+
+ clk_disable(spi100k->ick);
+ clk_disable(spi100k->fck);
+ spin_unlock_irq(&spi100k->lock);
+
+ if (status < 0)
+ printk(KERN_WARNING "spi transfer failed with %d\n", status);
+}
+
+static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k;
+ unsigned long flags;
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ /* Don't accept new work if we're shutting down */
+ if (spi100k->state == SPI_SHUTDOWN)
+ return -ESHUTDOWN;
+
+ /* reject invalid messages and transfers */
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
+
+ if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
+ || (len && !(rx_buf || tx_buf))
+ || (t->bits_per_word &&
+ ( t->bits_per_word < 4
+ || t->bits_per_word > 32))) {
+ dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+
+ if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
+ dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
+ t->speed_hz,
+ OMAP1_SPI100K_MAX_FREQ/(1<<16));
+ return -EINVAL;
+ }
+
+ }
+
+ spin_lock_irqsave(&spi100k->lock, flags);
+ list_add_tail(&m->queue, &spi100k->msg_queue);
+ queue_work(omap1_spi100k_wq, &spi100k->work);
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+
+ return 0;
+}
+
+static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
+{
+ return 0;
+}
+
+static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ if (!pdev->id)
+ return -EINVAL;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap1_spi100k_setup;
+ master->transfer = omap1_spi100k_transfer;
+ master->cleanup = NULL;
+ master->num_chipselect = 2;
+ master->mode_bits = MODEBITS;
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ spi100k = spi_master_get_devdata(master);
+ spi100k->master = master;
+
+ /*
+ * The memory region base address is taken as the platform_data.
+ * You should allocate this with ioremap() before initializing
+ * the SPI.
+ */
+ spi100k->base = (void __iomem *) pdev->dev.platform_data;
+
+ INIT_WORK(&spi100k->work, omap1_spi100k_work);
+
+ spin_lock_init(&spi100k->lock);
+ INIT_LIST_HEAD(&spi100k->msg_queue);
+ spi100k->ick = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(spi100k->ick)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
+ status = PTR_ERR(spi100k->ick);
+ goto err1;
+ }
+
+ spi100k->fck = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(spi100k->fck)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
+ status = PTR_ERR(spi100k->fck);
+ goto err2;
+ }
+
+ if (omap1_spi100k_reset(spi100k) < 0)
+ goto err3;
+
+ status = spi_register_master(master);
+ if (status < 0)
+ goto err3;
+
+ spi100k->state = SPI_RUNNING;
+
+ return status;
+
+err3:
+ clk_put(spi100k->fck);
+err2:
+ clk_put(spi100k->ick);
+err1:
+ spi_master_put(master);
+ return status;
+}
+
+static int __exit omap1_spi100k_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ struct resource *r;
+ unsigned limit = 500;
+ unsigned long flags;
+ int status = 0;
+
+ master = dev_get_drvdata(&pdev->dev);
+ spi100k = spi_master_get_devdata(master);
+
+ spin_lock_irqsave(&spi100k->lock, flags);
+
+ spi100k->state = SPI_SHUTDOWN;
+ while (!list_empty(&spi100k->msg_queue) && limit--) {
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&spi100k->lock, flags);
+ }
+
+ if (!list_empty(&spi100k->msg_queue))
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+
+ if (status != 0)
+ return status;
+
+ clk_put(spi100k->fck);
+ clk_put(spi100k->ick);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+static struct platform_driver omap1_spi100k_driver = {
+ .driver = {
+ .name = "omap1_spi100k",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(omap1_spi100k_remove),
+};
+
+
+static int __init omap1_spi100k_init(void)
+{
+ omap1_spi100k_wq = create_singlethread_workqueue(
+ omap1_spi100k_driver.driver.name);
+
+ if (omap1_spi100k_wq == NULL)
+ return -1;
+
+ return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
+}
+
+static void __exit omap1_spi100k_exit(void)
+{
+ platform_driver_unregister(&omap1_spi100k_driver);
+
+ destroy_workqueue(omap1_spi100k_wq);
+}
+
+module_init(omap1_spi100k_init);
+module_exit(omap1_spi100k_exit);
+
+MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
+MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index c8c2b693ffac..c2f707e5ce74 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1709,7 +1709,7 @@ static int pxa2xx_spi_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxa2xx_spi_pm_ops = {
+static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
.suspend = pxa2xx_spi_suspend,
.resume = pxa2xx_spi_resume,
};
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 89c22efedfb0..1893f1e96dc4 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -44,6 +44,9 @@
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
+#define MX3_CSPISTAT 0x14
+#define MX3_CSPISTAT_RR (1 << 3)
+
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -205,7 +208,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
if (cpu_is_mx31())
reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
- else if (cpu_is_mx35()) {
+ else if (cpu_is_mx25() || cpu_is_mx35()) {
reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
reg |= MX31_CSPICTRL_SSCTL;
}
@@ -219,7 +222,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
if (config->cs < 0) {
if (cpu_is_mx31())
reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
- else if (cpu_is_mx35())
+ else if (cpu_is_mx25() || cpu_is_mx35())
reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
}
@@ -481,7 +484,7 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static int __init spi_imx_probe(struct platform_device *pdev)
+static int __devinit spi_imx_probe(struct platform_device *pdev)
{
struct spi_imx_master *mxc_platform_info;
struct spi_master *master;
@@ -489,7 +492,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
- mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data;
+ mxc_platform_info = dev_get_platdata(&pdev->dev);
if (!mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
@@ -513,11 +516,12 @@ static int __init spi_imx_probe(struct platform_device *pdev)
continue;
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
- i--;
- while (i > 0)
+ while (i > 0) {
+ i--;
if (spi_imx->chipselect[i] >= 0)
- gpio_free(spi_imx->chipselect[i--]);
- dev_err(&pdev->dev, "can't get cs gpios");
+ gpio_free(spi_imx->chipselect[i]);
+ }
+ dev_err(&pdev->dev, "can't get cs gpios\n");
goto out_master_put;
}
}
@@ -551,7 +555,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
}
spi_imx->irq = platform_get_irq(pdev, 0);
- if (!spi_imx->irq) {
+ if (spi_imx->irq <= 0) {
ret = -EINVAL;
goto out_iounmap;
}
@@ -562,7 +566,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
goto out_iounmap;
}
- if (cpu_is_mx31() || cpu_is_mx35()) {
+ if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) {
spi_imx->intctrl = mx31_intctrl;
spi_imx->config = mx31_config;
spi_imx->trigger = mx31_trigger;
@@ -590,9 +594,14 @@ static int __init spi_imx_probe(struct platform_device *pdev)
clk_enable(spi_imx->clk);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
- if (!cpu_is_mx31() || !cpu_is_mx35())
+ if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
writel(1, spi_imx->base + MXC_RESET);
+ /* drain receive buffer */
+ if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35())
+ while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR)
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+
spi_imx->intctrl(spi_imx, 0);
ret = spi_bitbang_start(&spi_imx->bitbang);
@@ -625,7 +634,7 @@ out_master_put:
return ret;
}
-static int __exit spi_imx_remove(struct platform_device *pdev)
+static int __devexit spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -659,7 +668,7 @@ static struct platform_driver spi_imx_driver = {
.owner = THIS_MODULE,
},
.probe = spi_imx_probe,
- .remove = __exit_p(spi_imx_remove),
+ .remove = __devexit_p(spi_imx_remove),
};
static int __init spi_imx_init(void)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 930135dc73ba..e9390d747bfc 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -1356,7 +1356,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
- .remove = __exit_p(plat_mpc8xxx_spi_remove),
+ .remove = __devexit_p(plat_mpc8xxx_spi_remove),
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
new file mode 100644
index 000000000000..b319f9bf9b9b
--- /dev/null
+++ b/drivers/spi/spi_nuc900.c
@@ -0,0 +1,504 @@
+/* linux/drivers/spi/spi_nuc900.c
+ *
+ * Copyright (c) 2009 Nuvoton technology.
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <mach/nuc900_spi.h>
+
+/* usi registers offset */
+#define USI_CNT 0x00
+#define USI_DIV 0x04
+#define USI_SSR 0x08
+#define USI_RX0 0x10
+#define USI_TX0 0x10
+
+/* usi register bit */
+#define ENINT (0x01 << 17)
+#define ENFLG (0x01 << 16)
+#define TXNUM (0x03 << 8)
+#define TXNEG (0x01 << 2)
+#define RXNEG (0x01 << 1)
+#define LSB (0x01 << 10)
+#define SELECTLEV (0x01 << 2)
+#define SELECTPOL (0x01 << 31)
+#define SELECTSLAVE 0x01
+#define GOBUSY 0x01
+
+struct nuc900_spi {
+ struct spi_bitbang bitbang;
+ struct completion done;
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+ const unsigned char *tx;
+ unsigned char *rx;
+ struct clk *clk;
+ struct resource *ioarea;
+ struct spi_master *master;
+ struct spi_device *curdev;
+ struct device *dev;
+ struct nuc900_spi_info *pdata;
+ spinlock_t lock;
+ struct resource *res;
+};
+
+static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+ unsigned int val;
+ unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_SSR);
+
+ if (!cs)
+ val &= ~SELECTLEV;
+ else
+ val |= SELECTLEV;
+
+ if (!ssr)
+ val &= ~SELECTSLAVE;
+ else
+ val |= SELECTSLAVE;
+
+ __raw_writel(val, hw->regs + USI_SSR);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (!cpol)
+ val &= ~SELECTPOL;
+ else
+ val |= SELECTPOL;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_chipsel(struct spi_device *spi, int value)
+{
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ nuc900_slave_select(spi, 0);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ nuc900_slave_select(spi, 1);
+ break;
+ }
+}
+
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
+ unsigned int txnum)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (!txnum)
+ val &= ~TXNUM;
+ else
+ val |= txnum << 0x08;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+}
+
+static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
+ unsigned int txbitlen)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= (txbitlen << 0x03);
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_gobusy(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= GOBUSY;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static int nuc900_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ return 0;
+}
+
+static int nuc900_spi_setup(struct spi_device *spi)
+{
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0;
+}
+
+static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0);
+
+ nuc900_spi_gobusy(hw);
+
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static irqreturn_t nuc900_spi_irq(int irq, void *dev)
+{
+ struct nuc900_spi *hw = dev;
+ unsigned int status;
+ unsigned int count = hw->count;
+
+ status = __raw_readl(hw->regs + USI_CNT);
+ __raw_writel(status, hw->regs + USI_CNT);
+
+ if (status & ENFLG) {
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = __raw_readl(hw->regs + USI_RX0);
+ count++;
+
+ if (count < hw->len) {
+ __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0);
+ nuc900_spi_gobusy(hw);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&hw->done);
+ return IRQ_HANDLED;
+}
+
+static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= TXNEG;
+ else
+ val &= ~TXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= RXNEG;
+ else
+ val &= ~RXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (lsb)
+ val |= LSB;
+ else
+ val &= ~LSB;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (sleep)
+ val |= (sleep << 12);
+ else
+ val &= ~(0x0f << 12);
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_enable_int(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= ENINT;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_divider(struct nuc900_spi *hw)
+{
+ __raw_writel(hw->pdata->divider, hw->regs + USI_DIV);
+}
+
+static void nuc900_init_spi(struct nuc900_spi *hw)
+{
+ clk_enable(hw->clk);
+ spin_lock_init(&hw->lock);
+
+ nuc900_tx_edge(hw, hw->pdata->txneg);
+ nuc900_rx_edge(hw, hw->pdata->rxneg);
+ nuc900_send_first(hw, hw->pdata->lsb);
+ nuc900_set_sleep(hw, hw->pdata->sleep);
+ nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen);
+ nuc900_spi_setup_txnum(hw, hw->pdata->txnum);
+ nuc900_set_divider(hw);
+ nuc900_enable_int(hw);
+}
+
+static int __devinit nuc900_spi_probe(struct platform_device *pdev)
+{
+ struct nuc900_spi *hw;
+ struct spi_master *master;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ hw = spi_master_get_devdata(master);
+ memset(hw, 0, sizeof(struct nuc900_spi));
+
+ hw->master = spi_master_get(master);
+ hw->pdata = pdev->dev.platform_data;
+ hw->dev = &pdev->dev;
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ master->mode_bits = SPI_MODE_0;
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = hw->pdata->bus_num;
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
+ hw->bitbang.chipselect = nuc900_spi_chipsel;
+ hw->bitbang.txrx_bufs = nuc900_spi_txrx;
+ hw->bitbang.master->setup = nuc900_spi_setup;
+
+ hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (hw->res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ hw->ioarea = request_mem_region(hw->res->start,
+ resource_size(hw->res), pdev->name);
+
+ if (hw->ioarea == NULL) {
+ dev_err(&pdev->dev, "Cannot reserve region\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+
+ hw->regs = ioremap(hw->res->start, resource_size(hw->res));
+ if (hw->regs == NULL) {
+ dev_err(&pdev->dev, "Cannot map IO\n");
+ err = -ENXIO;
+ goto err_iomap;
+ }
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ specified\n");
+ err = -ENOENT;
+ goto err_irq;
+ }
+
+ err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_irq;
+ }
+
+ hw->clk = clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ err = PTR_ERR(hw->clk);
+ goto err_clk;
+ }
+
+ mfp_set_groupg(&pdev->dev);
+ nuc900_init_spi(hw);
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ return 0;
+
+err_register:
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+err_clk:
+ free_irq(hw->irq, hw);
+err_irq:
+ iounmap(hw->regs);
+err_iomap:
+ release_mem_region(hw->res->start, resource_size(hw->res));
+ kfree(hw->ioarea);
+err_pdata:
+ spi_master_put(hw->master);;
+
+err_nomem:
+ return err;
+}
+
+static int __devexit nuc900_spi_remove(struct platform_device *dev)
+{
+ struct nuc900_spi *hw = platform_get_drvdata(dev);
+
+ free_irq(hw->irq, hw);
+
+ platform_set_drvdata(dev, NULL);
+
+ spi_unregister_master(hw->master);
+
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+
+ iounmap(hw->regs);
+
+ release_mem_region(hw->res->start, resource_size(hw->res));
+ kfree(hw->ioarea);
+
+ spi_master_put(hw->master);
+ return 0;
+}
+
+static struct platform_driver nuc900_spi_driver = {
+ .probe = nuc900_spi_probe,
+ .remove = __devexit_p(nuc900_spi_remove),
+ .driver = {
+ .name = "nuc900-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nuc900_spi_init(void)
+{
+ return platform_driver_register(&nuc900_spi_driver);
+}
+
+static void __exit nuc900_spi_exit(void)
+{
+ platform_driver_unregister(&nuc900_spi_driver);
+}
+
+module_init(nuc900_spi_init);
+module_exit(nuc900_spi_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("nuc900 spi driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-spi");
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 33d94f76b9ef..276591569c8b 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -489,7 +489,7 @@ static int s3c24xx_spi_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops s3c24xx_spi_pmops = {
+static const struct dev_pm_ops s3c24xx_spi_pmops = {
.suspend = s3c24xx_spi_suspend,
.resume = s3c24xx_spi_resume,
};
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
new file mode 100644
index 000000000000..51e5e1dfa6e5
--- /dev/null
+++ b/drivers/spi/spi_sh_msiof.c
@@ -0,0 +1,691 @@
+/*
+ * SuperH MSIOF SPI Master Interface
+ *
+ * Copyright (c) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/sh_msiof.h>
+
+#include <asm/spi.h>
+#include <asm/unaligned.h>
+
+struct sh_msiof_spi_priv {
+ struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct platform_device *pdev;
+ struct sh_msiof_spi_info *info;
+ struct completion done;
+ unsigned long flags;
+ int tx_fifo_size;
+ int rx_fifo_size;
+};
+
+#define TMDR1 0x00
+#define TMDR2 0x04
+#define TMDR3 0x08
+#define RMDR1 0x10
+#define RMDR2 0x14
+#define RMDR3 0x18
+#define TSCR 0x20
+#define RSCR 0x22
+#define CTR 0x28
+#define FCTR 0x30
+#define STR 0x40
+#define IER 0x44
+#define TDR1 0x48
+#define TDR2 0x4c
+#define TFDR 0x50
+#define RDR1 0x58
+#define RDR2 0x5c
+#define RFDR 0x60
+
+#define CTR_TSCKE (1 << 15)
+#define CTR_TFSE (1 << 14)
+#define CTR_TXE (1 << 9)
+#define CTR_RXE (1 << 8)
+
+#define STR_TEOF (1 << 23)
+#define STR_REOF (1 << 7)
+
+static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ return ioread16(p->mapbase + reg_offs);
+ default:
+ return ioread32(p->mapbase + reg_offs);
+ }
+}
+
+static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
+ unsigned long value)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ iowrite16(value, p->mapbase + reg_offs);
+ break;
+ default:
+ iowrite32(value, p->mapbase + reg_offs);
+ break;
+ }
+}
+
+static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
+ unsigned long clr, unsigned long set)
+{
+ unsigned long mask = clr | set;
+ unsigned long data;
+ int k;
+
+ data = sh_msiof_read(p, CTR);
+ data &= ~clr;
+ data |= set;
+ sh_msiof_write(p, CTR, data);
+
+ for (k = 100; k > 0; k--) {
+ if ((sh_msiof_read(p, CTR) & mask) == set)
+ break;
+
+ udelay(10);
+ }
+
+ return k > 0 ? 0 : -ETIMEDOUT;
+}
+
+static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
+{
+ struct sh_msiof_spi_priv *p = data;
+
+ /* just disable the interrupt and wake up */
+ sh_msiof_write(p, IER, 0);
+ complete(&p->done);
+
+ return IRQ_HANDLED;
+}
+
+static struct {
+ unsigned short div;
+ unsigned short scr;
+} const sh_msiof_spi_clk_table[] = {
+ { 1, 0x0007 },
+ { 2, 0x0000 },
+ { 4, 0x0001 },
+ { 8, 0x0002 },
+ { 16, 0x0003 },
+ { 32, 0x0004 },
+ { 64, 0x1f00 },
+ { 128, 0x1f01 },
+ { 256, 0x1f02 },
+ { 512, 0x1f03 },
+ { 1024, 0x1f04 },
+};
+
+static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
+ unsigned long parent_rate,
+ unsigned long spi_hz)
+{
+ unsigned long div = 1024;
+ size_t k;
+
+ if (!WARN_ON(!spi_hz || !parent_rate))
+ div = parent_rate / spi_hz;
+
+ /* TODO: make more fine grained */
+
+ for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
+ if (sh_msiof_spi_clk_table[k].div >= div)
+ break;
+ }
+
+ k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
+
+ sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+}
+
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+ int cpol, int cpha,
+ int tx_hi_z, int lsb_first)
+{
+ unsigned long tmp;
+ int edge;
+
+ /*
+ * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!)
+ * 0 0 10 10 1 0
+ * 0 1 10 10 0 1
+ * 1 0 11 11 0 1
+ * 1 1 11 11 1 0
+ *
+ * (!) Note: REDG is inverted recommended data sheet setting
+ */
+
+ sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
+ sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
+
+ tmp = 0xa0000000;
+ tmp |= cpol << 30; /* TSCKIZ */
+ tmp |= cpol << 28; /* RSCKIZ */
+
+ edge = cpol ? cpha : !cpha;
+
+ tmp |= edge << 27; /* TEDG */
+ tmp |= !edge << 26; /* REDG */
+ tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ sh_msiof_write(p, CTR, tmp);
+}
+
+static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, void *rx_buf,
+ int bits, int words)
+{
+ unsigned long dr2;
+
+ dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+
+ if (tx_buf)
+ sh_msiof_write(p, TMDR2, dr2);
+ else
+ sh_msiof_write(p, TMDR2, dr2 | 1);
+
+ if (rx_buf)
+ sh_msiof_write(p, RMDR2, dr2);
+
+ sh_msiof_write(p, IER, STR_TEOF | STR_REOF);
+}
+
+static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
+{
+ sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+}
+
+static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned char *buf_8 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_8[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned short *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_16[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned short *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned int *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_32[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned int *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+}
+
+static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned char *buf_8 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned short *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned short *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+}
+
+static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned int *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned int *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+}
+
+static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
+{
+ int bits;
+
+ bits = t ? t->bits_per_word : 0;
+ bits = bits ? bits : spi->bits_per_word;
+ return bits;
+}
+
+static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ unsigned long hz;
+
+ hz = t ? t->speed_hz : 0;
+ hz = hz ? hz : spi->max_speed_hz;
+ return hz;
+}
+
+static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int bits;
+
+ /* noting to check hz values against since parent clock is disabled */
+
+ bits = sh_msiof_spi_bits(spi, t);
+ if (bits < 8)
+ return -EINVAL;
+ if (bits > 32)
+ return -EINVAL;
+
+ return spi_bitbang_setup_transfer(spi, t);
+}
+
+static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ int value;
+
+ /* chip select is active low unless SPI_CS_HIGH is set */
+ if (spi->mode & SPI_CS_HIGH)
+ value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
+ else
+ value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
+
+ if (is_on == BITBANG_CS_ACTIVE) {
+ if (!test_and_set_bit(0, &p->flags)) {
+ pm_runtime_get_sync(&p->pdev->dev);
+ clk_enable(p->clk);
+ }
+
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST));
+ }
+
+ /* use spi->controller data for CS (same strategy as spi_gpio) */
+ gpio_set_value((unsigned)spi->controller_data, value);
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ if (test_and_clear_bit(0, &p->flags)) {
+ clk_disable(p->clk);
+ pm_runtime_put(&p->pdev->dev);
+ }
+ }
+}
+
+static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
+ void (*tx_fifo)(struct sh_msiof_spi_priv *,
+ const void *, int, int),
+ void (*rx_fifo)(struct sh_msiof_spi_priv *,
+ void *, int, int),
+ const void *tx_buf, void *rx_buf,
+ int words, int bits)
+{
+ int fifo_shift;
+ int ret;
+
+ /* limit maximum word transfer to rx/tx fifo size */
+ if (tx_buf)
+ words = min_t(int, words, p->tx_fifo_size);
+ if (rx_buf)
+ words = min_t(int, words, p->rx_fifo_size);
+
+ /* the fifo contents need shifting */
+ fifo_shift = 32 - bits;
+
+ /* setup msiof transfer mode registers */
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+
+ /* write tx fifo */
+ if (tx_buf)
+ tx_fifo(p, tx_buf, words, fifo_shift);
+
+ /* setup clock and rx/tx signals */
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ if (rx_buf)
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+
+ /* start by setting frame bit */
+ INIT_COMPLETION(p->done);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto err;
+ }
+
+ /* wait for tx fifo to be emptied / rx fifo to be filled */
+ wait_for_completion(&p->done);
+
+ /* read rx fifo */
+ if (rx_buf)
+ rx_fifo(p, rx_buf, words, fifo_shift);
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ /* shut down frame, tx/tx and clock signals */
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ if (rx_buf)
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ goto err;
+ }
+
+ return words;
+
+ err:
+ sh_msiof_write(p, IER, 0);
+ return ret;
+}
+
+static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ int bits;
+ int bytes_per_word;
+ int bytes_done;
+ int words;
+ int n;
+
+ bits = sh_msiof_spi_bits(spi, t);
+
+ /* setup bytes per word and fifo read/write functions */
+ if (bits <= 8) {
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ } else if (bits <= 16) {
+ bytes_per_word = 2;
+ if ((unsigned long)t->tx_buf & 0x01)
+ tx_fifo = sh_msiof_spi_write_fifo_16u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_16;
+
+ if ((unsigned long)t->rx_buf & 0x01)
+ rx_fifo = sh_msiof_spi_read_fifo_16u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else {
+ bytes_per_word = 4;
+ if ((unsigned long)t->tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_32;
+
+ if ((unsigned long)t->rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_32;
+ }
+
+ /* setup clocks (clock already enabled in chipselect()) */
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
+ sh_msiof_spi_hz(spi, t));
+
+ /* transfer in fifo sized chunks */
+ words = t->len / bytes_per_word;
+ bytes_done = 0;
+
+ while (bytes_done < t->len) {
+ n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
+ t->tx_buf + bytes_done,
+ t->rx_buf + bytes_done,
+ words, bits);
+ if (n < 0)
+ break;
+
+ bytes_done += n * bytes_per_word;
+ words -= n;
+ }
+
+ return bytes_done;
+}
+
+static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits)
+{
+ BUG(); /* unused but needed by bitbang code */
+ return 0;
+}
+
+static int sh_msiof_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_msiof_spi_priv *p;
+ char clk_name[16];
+ int i;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ p = spi_master_get_devdata(master);
+
+ platform_set_drvdata(pdev, p);
+ p->info = pdev->dev.platform_data;
+ init_completion(&p->done);
+
+ snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
+ p->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i = platform_get_irq(pdev, 0);
+ if (!r || i < 0) {
+ dev_err(&pdev->dev, "cannot get platform resources\n");
+ ret = -ENOENT;
+ goto err2;
+ }
+ p->mapbase = ioremap_nocache(r->start, resource_size(r));
+ if (!p->mapbase) {
+ dev_err(&pdev->dev, "unable to ioremap\n");
+ ret = -ENXIO;
+ goto err2;
+ }
+
+ ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED,
+ dev_name(&pdev->dev), p);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request irq\n");
+ goto err3;
+ }
+
+ p->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+
+ /* The standard version of MSIOF use 64 word FIFOs */
+ p->tx_fifo_size = 64;
+ p->rx_fifo_size = 64;
+
+ /* Platform data may override FIFO sizes */
+ if (p->info->tx_fifo_override)
+ p->tx_fifo_size = p->info->tx_fifo_override;
+ if (p->info->rx_fifo_override)
+ p->rx_fifo_size = p->info->rx_fifo_override;
+
+ /* init master and bitbang code */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ master->flags = 0;
+ master->bus_num = pdev->id;
+ master->num_chipselect = p->info->num_chipselect;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+
+ p->bitbang.master = master;
+ p->bitbang.chipselect = sh_msiof_spi_chipselect;
+ p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
+ p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
+ p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
+
+ ret = spi_bitbang_start(&p->bitbang);
+ if (ret == 0)
+ return 0;
+
+ pm_runtime_disable(&pdev->dev);
+ err3:
+ iounmap(p->mapbase);
+ err2:
+ clk_put(p->clk);
+ err1:
+ spi_master_put(master);
+ err0:
+ return ret;
+}
+
+static int sh_msiof_spi_remove(struct platform_device *pdev)
+{
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = spi_bitbang_stop(&p->bitbang);
+ if (!ret) {
+ pm_runtime_disable(&pdev->dev);
+ free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq);
+ iounmap(p->mapbase);
+ clk_put(p->clk);
+ spi_master_put(p->bitbang.master);
+ }
+ return ret;
+}
+
+static int sh_msiof_spi_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
+ .runtime_suspend = sh_msiof_spi_runtime_nop,
+ .runtime_resume = sh_msiof_spi_runtime_nop,
+};
+
+static struct platform_driver sh_msiof_spi_drv = {
+ .probe = sh_msiof_spi_probe,
+ .remove = sh_msiof_spi_remove,
+ .driver = {
+ .name = "spi_sh_msiof",
+ .owner = THIS_MODULE,
+ .pm = &sh_msiof_spi_dev_pm_ops,
+ },
+};
+
+static int __init sh_msiof_spi_init(void)
+{
+ return platform_driver_register(&sh_msiof_spi_drv);
+}
+module_init(sh_msiof_spi_init);
+
+static void __exit sh_msiof_spi_exit(void)
+{
+ platform_driver_unregister(&sh_msiof_spi_drv);
+}
+module_exit(sh_msiof_spi_exit);
+
+MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 20d7322e2f71..9c446e6003d5 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -266,15 +266,15 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
#ifdef VERBOSE
- dev_dbg(&spi->dev,
+ dev_dbg(&spidev->spi->dev,
" xfer len %zd %s%s%s%dbits %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
- u_tmp->bits_per_word ? : spi->bits_per_word,
+ u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
- u_tmp->speed_hz ? : spi->max_speed_hz);
+ u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 5a143b9f6361..9f386379c169 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -14,22 +14,20 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/of_spi.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/io.h>
+#include "xilinx_spi.h"
+#include <linux/spi/xilinx_spi.h>
+
#define XILINX_SPI_NAME "xilinx_spi"
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
*/
-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
+#define XSPI_CR_OFFSET 0x60 /* Control Register */
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
@@ -40,8 +38,9 @@
#define XSPI_CR_RXFIFO_RESET 0x40
#define XSPI_CR_MANUAL_SSELECT 0x80
#define XSPI_CR_TRANS_INHIBIT 0x100
+#define XSPI_CR_LSB_FIRST 0x200
-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
+#define XSPI_SR_OFFSET 0x64 /* Status Register */
#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
@@ -49,8 +48,8 @@
#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
@@ -70,6 +69,7 @@
#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
@@ -78,35 +78,85 @@ struct xilinx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
-
+ struct resource mem; /* phys mem */
void __iomem *regs; /* virt. address of the control registers */
u32 irq;
- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
-
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
int remaining_bytes; /* the number of bytes left to transfer */
+ u8 bits_per_word;
+ unsigned int (*read_fn) (void __iomem *);
+ void (*write_fn) (u32, void __iomem *);
+ void (*tx_fn) (struct xilinx_spi *);
+ void (*rx_fn) (struct xilinx_spi *);
};
-static void xspi_init_hw(void __iomem *regs_base)
+static void xspi_tx8(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr++;
+}
+
+static void xspi_tx16(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += 2;
+}
+
+static void xspi_tx32(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += 4;
+}
+
+static void xspi_rx8(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *xspi->rx_ptr = data & 0xff;
+ xspi->rx_ptr++;
+ }
+}
+
+static void xspi_rx16(struct xilinx_spi *xspi)
{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *(u16 *)(xspi->rx_ptr) = data & 0xffff;
+ xspi->rx_ptr += 2;
+ }
+}
+
+static void xspi_rx32(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *(u32 *)(xspi->rx_ptr) = data;
+ xspi->rx_ptr += 4;
+ }
+}
+
+static void xspi_init_hw(struct xilinx_spi *xspi)
+{
+ void __iomem *regs_base = xspi->regs;
+
/* Reset the SPI device */
- out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
- XIPIF_V123B_RESET_MASK);
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
/* Disable all the interrupts just in case */
- out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Enable the global IPIF interrupt */
- out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
- XIPIF_V123B_GINTR_ENABLE);
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
+ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
/* Disable the transmitter, enable Manual Slave Select Assertion,
* put SPI controller into master mode, and enable it */
- out_be16(regs_base + XSPI_CR_OFFSET,
- XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
- | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+ xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
+ XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
+ XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
}
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
@@ -115,16 +165,16 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
if (is_on == BITBANG_CS_INACTIVE) {
/* Deselect the slave on the SPI bus */
- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
+ xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
} else if (is_on == BITBANG_CS_ACTIVE) {
/* Set the SPI clock phase and polarity */
- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
+ u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
& ~XSPI_CR_MODE_MASK;
if (spi->mode & SPI_CPHA)
cr |= XSPI_CR_CPHA;
if (spi->mode & SPI_CPOL)
cr |= XSPI_CR_CPOL;
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
/* We do not check spi->max_speed_hz here as the SPI clock
* frequency is not software programmable (the IP block design
@@ -132,25 +182,27 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
*/
/* Activate the chip select */
- out_be32(xspi->regs + XSPI_SSR_OFFSET,
- ~(0x0001 << spi->chip_select));
+ xspi->write_fn(~(0x0001 << spi->chip_select),
+ xspi->regs + XSPI_SSR_OFFSET);
}
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
* custom txrx_bufs(). We have nothing to setup here as the SPI IP block
- * supports just 8 bits per word, and SPI clock can't be changed in software.
- * Check for 8 bits per word. Chip select delay calculations could be
+ * supports 8 or 16 bits per word which cannot be changed in software.
+ * SPI clock can't be changed in software either.
+ * Check for correct bits per word. Chip select delay calculations could be
* added here as soon as bitbang_work() can be made aware of the delay value.
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u8 bits_per_word;
bits_per_word = (t && t->bits_per_word)
? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != 8) {
+ if (bits_per_word != xspi->bits_per_word) {
dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
__func__, bits_per_word);
return -EINVAL;
@@ -161,17 +213,16 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
static int xilinx_spi_setup(struct spi_device *spi)
{
- struct spi_bitbang *bitbang;
- struct xilinx_spi *xspi;
- int retval;
-
- xspi = spi_master_get_devdata(spi->master);
- bitbang = &xspi->bitbang;
-
- retval = xilinx_spi_setup_transfer(spi, NULL);
- if (retval < 0)
- return retval;
-
+ /* always return 0, we can not check the number of bits.
+ * There are cases when SPI setup is called before any driver is
+ * there, in that case the SPI core defaults to 8 bits, which we
+ * do not support in some cases. But if we return an error, the
+ * SPI device would not be registered and no driver can get hold of it
+ * When the driver is there, it will call SPI setup again with the
+ * correct number of bits per transfer.
+ * If a driver setups with the wrong bit number, it will fail when
+ * it tries to do a transfer
+ */
return 0;
}
@@ -180,15 +231,14 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
u8 sr;
/* Fill the Tx FIFO with as many bytes as possible */
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
- if (xspi->tx_ptr) {
- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
- } else {
- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
- }
- xspi->remaining_bytes--;
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ if (xspi->tx_ptr)
+ xspi->tx_fn(xspi);
+ else
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->remaining_bytes -= xspi->bits_per_word / 8;
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
}
}
@@ -210,18 +260,19 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
*/
- ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
- ipif_ier | XSPI_INTR_TX_EMPTY);
+ ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
+ xspi->regs + XIPIF_V123B_IIER_OFFSET);
/* Start the transfer by not inhibiting the transmitter any longer */
- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+ ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
wait_for_completion(&xspi->done);
/* Disable the transmit empty interrupt */
- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
+ xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
return t->len - xspi->remaining_bytes;
}
@@ -238,8 +289,8 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
u32 ipif_isr;
/* Get the IPIF interrupts, and clear them immediately */
- ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
u16 cr;
@@ -250,20 +301,15 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
* transmitter while the Isr refills the transmit register/FIFO,
* or make sure it is stopped if we're done.
*/
- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
- out_be16(xspi->regs + XSPI_CR_OFFSET,
- cr | XSPI_CR_TRANS_INHIBIT);
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
/* Read out all the data from the Rx FIFO */
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- u8 data;
-
- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
- if (xspi->rx_ptr) {
- *xspi->rx_ptr++ = data;
- }
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ xspi->rx_fn(xspi);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
}
/* See if there is more data to send */
@@ -272,7 +318,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
/* Start the transfer by not inhibiting the
* transmitter any longer
*/
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
} else {
/* No more data to send.
* Indicate the transfer is completed.
@@ -284,40 +330,22 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
- const struct of_device_id *match)
+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
+ u32 irq, s16 bus_num)
{
struct spi_master *master;
struct xilinx_spi *xspi;
- struct resource r_irq_struct;
- struct resource r_mem_struct;
-
- struct resource *r_irq = &r_irq_struct;
- struct resource *r_mem = &r_mem_struct;
- int rc = 0;
- const u32 *prop;
- int len;
-
- /* Get resources(memory, IRQ) associated with the device */
- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
+ struct xspi_platform_data *pdata = dev->platform_data;
+ int ret;
- if (master == NULL) {
- return -ENOMEM;
+ if (!pdata) {
+ dev_err(dev, "No platform data attached\n");
+ return NULL;
}
- dev_set_drvdata(&ofdev->dev, master);
-
- rc = of_address_to_resource(ofdev->node, 0, r_mem);
- if (rc) {
- dev_warn(&ofdev->dev, "invalid address\n");
- goto put_master;
- }
-
- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
- if (rc == NO_IRQ) {
- dev_warn(&ofdev->dev, "no IRQ found\n");
- goto put_master;
- }
+ master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
+ if (!master)
+ return NULL;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
@@ -330,128 +358,87 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev,
xspi->bitbang.master->setup = xilinx_spi_setup;
init_completion(&xspi->done);
- xspi->irq = r_irq->start;
-
- if (!request_mem_region(r_mem->start,
- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
- rc = -ENXIO;
- dev_warn(&ofdev->dev, "memory request failure\n");
+ if (!request_mem_region(mem->start, resource_size(mem),
+ XILINX_SPI_NAME))
goto put_master;
- }
- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ xspi->regs = ioremap(mem->start, resource_size(mem));
if (xspi->regs == NULL) {
- rc = -ENOMEM;
- dev_warn(&ofdev->dev, "ioremap failure\n");
- goto release_mem;
+ dev_warn(dev, "ioremap failure\n");
+ goto map_failed;
}
- xspi->irq = r_irq->start;
-
- /* dynamic bus assignment */
- master->bus_num = -1;
- /* number of slave select bits is required */
- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
- goto unmap_io;
+ master->bus_num = bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+
+ xspi->mem = *mem;
+ xspi->irq = irq;
+ if (pdata->little_endian) {
+ xspi->read_fn = ioread32;
+ xspi->write_fn = iowrite32;
+ } else {
+ xspi->read_fn = ioread32be;
+ xspi->write_fn = iowrite32be;
}
- master->num_chipselect = *prop;
+ xspi->bits_per_word = pdata->bits_per_word;
+ if (xspi->bits_per_word == 8) {
+ xspi->tx_fn = xspi_tx8;
+ xspi->rx_fn = xspi_rx8;
+ } else if (xspi->bits_per_word == 16) {
+ xspi->tx_fn = xspi_tx16;
+ xspi->rx_fn = xspi_rx16;
+ } else if (xspi->bits_per_word == 32) {
+ xspi->tx_fn = xspi_tx32;
+ xspi->rx_fn = xspi_rx32;
+ } else
+ goto unmap_io;
+
/* SPI controller initializations */
- xspi_init_hw(xspi->regs);
+ xspi_init_hw(xspi);
/* Register for SPI Interrupt */
- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
- if (rc != 0) {
- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ if (ret)
goto unmap_io;
- }
- rc = spi_bitbang_start(&xspi->bitbang);
- if (rc != 0) {
- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret) {
+ dev_err(dev, "spi_bitbang_start FAILED\n");
goto free_irq;
}
- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
-
- /* Add any subnodes on the SPI bus */
- of_register_spi_devices(master, ofdev->node);
-
- return rc;
+ dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
+ (unsigned long long)mem->start, xspi->regs, xspi->irq);
+ return master;
free_irq:
free_irq(xspi->irq, xspi);
unmap_io:
iounmap(xspi->regs);
-release_mem:
- release_mem_region(r_mem->start, resource_size(r_mem));
+map_failed:
+ release_mem_region(mem->start, resource_size(mem));
put_master:
spi_master_put(master);
- return rc;
+ return NULL;
}
+EXPORT_SYMBOL(xilinx_spi_init);
-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
+void xilinx_spi_deinit(struct spi_master *master)
{
struct xilinx_spi *xspi;
- struct spi_master *master;
- struct resource r_mem;
- master = platform_get_drvdata(ofdev);
xspi = spi_master_get_devdata(master);
spi_bitbang_stop(&xspi->bitbang);
free_irq(xspi->irq, xspi);
iounmap(xspi->regs);
- if (!of_address_to_resource(ofdev->node, 0, &r_mem))
- release_mem_region(r_mem.start, resource_size(&r_mem));
- dev_set_drvdata(&ofdev->dev, 0);
- spi_master_put(xspi->bitbang.master);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
-
-static int __exit xilinx_spi_of_remove(struct of_device *op)
-{
- return xilinx_spi_remove(op);
-}
-static struct of_device_id xilinx_spi_of_match[] = {
- { .compatible = "xlnx,xps-spi-2.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.b", },
- {}
-};
-
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-
-static struct of_platform_driver xilinx_spi_of_driver = {
- .owner = THIS_MODULE,
- .name = "xilinx-xps-spi",
- .match_table = xilinx_spi_of_match,
- .probe = xilinx_spi_of_probe,
- .remove = __exit_p(xilinx_spi_of_remove),
- .driver = {
- .name = "xilinx-xps-spi",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init xilinx_spi_init(void)
-{
- return of_register_platform_driver(&xilinx_spi_of_driver);
+ release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
+ spi_master_put(xspi->bitbang.master);
}
-module_init(xilinx_spi_init);
+EXPORT_SYMBOL(xilinx_spi_deinit);
-static void __exit xilinx_spi_exit(void)
-{
- of_unregister_platform_driver(&xilinx_spi_of_driver);
-}
-module_exit(xilinx_spi_exit);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx SPI driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h
new file mode 100644
index 000000000000..d211accf68d2
--- /dev/null
+++ b/drivers/spi/xilinx_spi.h
@@ -0,0 +1,32 @@
+/*
+ * Xilinx SPI device driver API and platform data header file
+ *
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _XILINX_SPI_H_
+#define _XILINX_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XILINX_SPI_NAME "xilinx_spi"
+
+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
+ u32 irq, s16 bus_num);
+
+void xilinx_spi_deinit(struct spi_master *master);
+#endif
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
new file mode 100644
index 000000000000..71dc3adc0495
--- /dev/null
+++ b/drivers/spi/xilinx_spi_of.c
@@ -0,0 +1,134 @@
+/*
+ * Xilinx SPI OF device driver
+ *
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Xilinx SPI devices as OF devices
+ *
+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_spi.h>
+
+#include <linux/spi/xilinx_spi.h>
+#include "xilinx_spi.h"
+
+
+static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct spi_master *master;
+ struct xspi_platform_data *pdata;
+ struct resource r_mem;
+ struct resource r_irq;
+ int rc = 0;
+ const u32 *prop;
+ int len;
+
+ rc = of_address_to_resource(ofdev->node, 0, &r_mem);
+ if (rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
+ if (rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found\n");
+ return -ENODEV;
+ }
+
+ ofdev->dev.platform_data =
+ kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL);
+ pdata = ofdev->dev.platform_data;
+ if (!pdata)
+ return -ENOMEM;
+
+ /* number of slave select bits is required */
+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
+ return -EINVAL;
+ }
+ pdata->num_chipselect = *prop;
+ pdata->bits_per_word = 8;
+ master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1);
+ if (!master)
+ return -ENODEV;
+
+ dev_set_drvdata(&ofdev->dev, master);
+
+ /* Add any subnodes on the SPI bus */
+ of_register_spi_devices(master, ofdev->node);
+
+ return 0;
+}
+
+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
+{
+ xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
+ dev_set_drvdata(&ofdev->dev, 0);
+ kfree(ofdev->dev.platform_data);
+ ofdev->dev.platform_data = NULL;
+ return 0;
+}
+
+static int __exit xilinx_spi_of_remove(struct of_device *op)
+{
+ return xilinx_spi_remove(op);
+}
+
+static struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
+static struct of_platform_driver xilinx_spi_of_driver = {
+ .match_table = xilinx_spi_of_match,
+ .probe = xilinx_spi_of_probe,
+ .remove = __exit_p(xilinx_spi_of_remove),
+ .driver = {
+ .name = "xilinx-xps-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_of_init(void)
+{
+ return of_register_platform_driver(&xilinx_spi_of_driver);
+}
+module_init(xilinx_spi_of_init);
+
+static void __exit xilinx_spi_of_exit(void)
+{
+ of_unregister_platform_driver(&xilinx_spi_of_driver);
+}
+module_exit(xilinx_spi_of_exit);
+
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("Xilinx SPI platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c
new file mode 100644
index 000000000000..24debac646a9
--- /dev/null
+++ b/drivers/spi/xilinx_spi_pltfm.c
@@ -0,0 +1,102 @@
+/*
+ * Support for Xilinx SPI platform devices
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Xilinx SPI devices as platform devices
+ *
+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/xilinx_spi.h>
+
+#include "xilinx_spi.h"
+
+static int __devinit xilinx_spi_probe(struct platform_device *dev)
+{
+ struct xspi_platform_data *pdata;
+ struct resource *r;
+ int irq;
+ struct spi_master *master;
+ u8 i;
+
+ pdata = dev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ master = xilinx_spi_init(&dev->dev, r, irq, dev->id);
+ if (!master)
+ return -ENODEV;
+
+ for (i = 0; i < pdata->num_devices; i++)
+ spi_new_device(master, pdata->devices + i);
+
+ platform_set_drvdata(dev, master);
+ return 0;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+ xilinx_spi_deinit(platform_get_drvdata(dev));
+ platform_set_drvdata(dev, 0);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = __devexit_p(xilinx_spi_remove),
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_pltfm_init(void)
+{
+ return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_pltfm_init);
+
+static void __exit xilinx_spi_pltfm_exit(void)
+{
+ platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_pltfm_exit);
+
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("Xilinx SPI platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index aa53db9f2e88..1ef3b8fc50b3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -210,7 +210,7 @@ static int uio_pdrv_genirq_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
+static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
.runtime_suspend = uio_pdrv_genirq_runtime_nop,
.runtime_resume = uio_pdrv_genirq_runtime_nop,
};
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 91f2885b6ee1..2dcf906df569 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -363,7 +363,7 @@ static int hcd_pci_restore(struct device *dev)
return resume_common(dev, true);
}
-struct dev_pm_ops usb_hcd_pci_pm_ops = {
+const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend = hcd_pci_suspend,
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index d8b43aee581e..bbe2b924aae8 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -330,7 +330,7 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
#ifdef CONFIG_PM_SLEEP
-extern struct dev_pm_ops usb_hcd_pci_pm_ops;
+extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
#endif /* CONFIG_PCI */
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4e2c6df8d3cc..2fb42043b305 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -167,18 +167,23 @@ struct usb_host_interface *usb_altnum_to_altsetting(
}
EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
+struct find_interface_arg {
+ int minor;
+ struct device_driver *drv;
+};
+
static int __find_interface(struct device *dev, void *data)
{
- int *minor = data;
+ struct find_interface_arg *arg = data;
struct usb_interface *intf;
if (!is_usb_interface(dev))
return 0;
+ if (dev->driver != arg->drv)
+ return 0;
intf = to_usb_interface(dev);
- if (intf->minor != -1 && intf->minor == *minor)
- return 1;
- return 0;
+ return intf->minor == arg->minor;
}
/**
@@ -187,14 +192,18 @@ static int __find_interface(struct device *dev, void *data)
* @minor: the minor number of the desired device
*
* This walks the bus device list and returns a pointer to the interface
- * with the matching minor. Note, this only works for devices that share the
- * USB major number.
+ * with the matching minor and driver. Note, this only works for devices
+ * that share the USB major number.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
+ struct find_interface_arg argb;
struct device *dev;
- dev = bus_find_device(&usb_bus_type, NULL, &minor, __find_interface);
+ argb.minor = minor;
+ argb.drv = &drv->drvwrap.driver;
+
+ dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
/* Drop reference count from bus_find_device */
put_device(dev);
@@ -320,7 +329,7 @@ static int usb_dev_restore(struct device *dev)
return usb_resume(dev, PMSG_RESTORE);
}
-static struct dev_pm_ops usb_device_pm_ops = {
+static const struct dev_pm_ops usb_device_pm_ops = {
.prepare = usb_dev_prepare,
.complete = usb_dev_complete,
.suspend = usb_dev_suspend,
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index ed77be76d6bb..dbfb482a94e3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops au1xxx_ehci_pmops = {
+static const struct dev_pm_ops au1xxx_ehci_pmops = {
.suspend = ehci_hcd_au1xxx_drv_suspend,
.resume = ehci_hcd_au1xxx_drv_resume,
};
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e4380082ebb1..17a6043c1fa0 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops au1xxx_ohci_pmops = {
+static const struct dev_pm_ops au1xxx_ohci_pmops = {
.suspend = ohci_hcd_au1xxx_drv_suspend,
.resume = ohci_hcd_au1xxx_drv_resume,
};
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index f1c06202fdf2..a18debdd79b8 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
+static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
.suspend = ohci_hcd_pxa27x_drv_suspend,
.resume = ohci_hcd_pxa27x_drv_resume,
};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 41dbc70ae752..b7a661c02bcd 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2353,7 +2353,7 @@ static int r8a66597_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops r8a66597_dev_pm_ops = {
+static const struct dev_pm_ops r8a66597_dev_pm_ops = {
.suspend = r8a66597_suspend,
.resume = r8a66597_resume,
.poweroff = r8a66597_suspend,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 49f2346afad3..bfe08f4975a3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2214,7 +2214,7 @@ static int musb_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops musb_dev_pm_ops = {
+static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
.resume_noirq = musb_resume_noirq,
};
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index bd9883f41e63..2be9f2fa41f9 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -33,7 +33,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
@@ -276,16 +276,16 @@ static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl,
{
u8 check;
- if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
- (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+ if ((twl_i2c_write_u8(module, data, address) >= 0) &&
+ (twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
1, module, address, check, data);
/* Failed once: Try again */
- if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
- (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+ if ((twl_i2c_write_u8(module, data, address) >= 0) &&
+ (twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
@@ -303,7 +303,7 @@ static inline int twl4030_usb_write(struct twl4030_usb *twl,
{
int ret = 0;
- ret = twl4030_i2c_write_u8(TWL4030_MODULE_USB, data, address);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_USB, data, address);
if (ret < 0)
dev_dbg(twl->dev,
"TWL4030:USB:Write[0x%x] Error %d\n", address, ret);
@@ -315,7 +315,7 @@ static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address)
u8 data;
int ret = 0;
- ret = twl4030_i2c_read_u8(module, &data, address);
+ ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
ret = data;
else
@@ -462,7 +462,7 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
* SLEEP. We work around this by clearing the bit after usv3v1
* is re-activated. This ensures that VUSB3V1 is really active.
*/
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
pwr &= ~PHY_PWR_PHYPWD;
@@ -505,44 +505,44 @@ static void twl4030_phy_resume(struct twl4030_usb *twl)
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
{
/* Enable writing to power configuration registers */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
/* put VUSB3V1 LDO in active state */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
/* Initialize 3.1V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
twl->usb3v1 = regulator_get(twl->dev, "usb3v1");
if (IS_ERR(twl->usb3v1))
return -ENODEV;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
/* Initialize 1.5V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
twl->usb1v5 = regulator_get(twl->dev, "usb1v5");
if (IS_ERR(twl->usb1v5))
goto fail1;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
/* Initialize 1.8V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
twl->usb1v8 = regulator_get(twl->dev, "usb1v8");
if (IS_ERR(twl->usb1v8))
goto fail2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
/* disable access to power configuration registers */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY);
return 0;
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index ad05da5ba3c7..4c10edecfb66 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -15,7 +15,7 @@
struct adp5520_bl {
struct device *master;
- struct adp5520_backlight_platfrom_data *pdata;
+ struct adp5520_backlight_platform_data *pdata;
struct mutex lock;
unsigned long cached_daylight_max;
int id;
@@ -31,29 +31,30 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
if (data->pdata->en_ambl_sens) {
if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
/* Disable Ambient Light auto adjust */
- ret |= adp5520_clr_bits(master, BL_CONTROL,
- BL_AUTO_ADJ);
- ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+ ret |= adp5520_clr_bits(master, ADP5520_BL_CONTROL,
+ ADP5520_BL_AUTO_ADJ);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
+ brightness);
} else {
/*
* MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
* restore daylight l3 sysfs brightness
*/
- ret |= adp5520_write(master, DAYLIGHT_MAX,
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
data->cached_daylight_max);
- ret |= adp5520_set_bits(master, BL_CONTROL,
- BL_AUTO_ADJ);
+ ret |= adp5520_set_bits(master, ADP5520_BL_CONTROL,
+ ADP5520_BL_AUTO_ADJ);
}
} else {
- ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness);
}
if (data->current_brightness && brightness == 0)
ret |= adp5520_set_bits(master,
- MODE_STATUS, DIM_EN);
+ ADP5520_MODE_STATUS, ADP5520_DIM_EN);
else if (data->current_brightness == 0 && brightness)
ret |= adp5520_clr_bits(master,
- MODE_STATUS, DIM_EN);
+ ADP5520_MODE_STATUS, ADP5520_DIM_EN);
if (!ret)
data->current_brightness = brightness;
@@ -79,7 +80,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
int error;
uint8_t reg_val;
- error = adp5520_read(data->master, BL_VALUE, &reg_val);
+ error = adp5520_read(data->master, ADP5520_BL_VALUE, &reg_val);
return error ? data->current_brightness : reg_val;
}
@@ -93,33 +94,46 @@ static int adp5520_bl_setup(struct backlight_device *bl)
{
struct adp5520_bl *data = bl_get_data(bl);
struct device *master = data->master;
- struct adp5520_backlight_platfrom_data *pdata = data->pdata;
+ struct adp5520_backlight_platform_data *pdata = data->pdata;
int ret = 0;
- ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
- ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
+ pdata->l1_daylight_max);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_DIM,
+ pdata->l1_daylight_dim);
if (pdata->en_ambl_sens) {
data->cached_daylight_max = pdata->l1_daylight_max;
- ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
- ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
- ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
- ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
- ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
- ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
- ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
- ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
- ret |= adp5520_write(master, ALS_CMPR_CFG,
- ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
+ ret |= adp5520_write(master, ADP5520_OFFICE_MAX,
+ pdata->l2_office_max);
+ ret |= adp5520_write(master, ADP5520_OFFICE_DIM,
+ pdata->l2_office_dim);
+ ret |= adp5520_write(master, ADP5520_DARK_MAX,
+ pdata->l3_dark_max);
+ ret |= adp5520_write(master, ADP5520_DARK_DIM,
+ pdata->l3_dark_dim);
+ ret |= adp5520_write(master, ADP5520_L2_TRIP,
+ pdata->l2_trip);
+ ret |= adp5520_write(master, ADP5520_L2_HYS,
+ pdata->l2_hyst);
+ ret |= adp5520_write(master, ADP5520_L3_TRIP,
+ pdata->l3_trip);
+ ret |= adp5520_write(master, ADP5520_L3_HYS,
+ pdata->l3_hyst);
+ ret |= adp5520_write(master, ADP5520_ALS_CMPR_CFG,
+ ALS_CMPR_CFG_VAL(pdata->abml_filt,
+ ADP5520_L3_EN));
}
- ret |= adp5520_write(master, BL_CONTROL,
- BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
+ ret |= adp5520_write(master, ADP5520_BL_CONTROL,
+ BL_CTRL_VAL(pdata->fade_led_law,
+ pdata->en_ambl_sens));
- ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
+ ret |= adp5520_write(master, ADP5520_BL_FADE, FADE_VAL(pdata->fade_in,
pdata->fade_out));
- ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
+ ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS,
+ ADP5520_BL_EN | ADP5520_DIM_EN);
return ret;
}
@@ -156,29 +170,31 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
}
static ssize_t adp5520_bl_dark_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DARK_MAX);
+ return adp5520_show(dev, buf, ADP5520_DARK_MAX);
}
static ssize_t adp5520_bl_dark_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DARK_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_DARK_MAX);
}
static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
adp5520_bl_dark_max_store);
static ssize_t adp5520_bl_office_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, OFFICE_MAX);
+ return adp5520_show(dev, buf, ADP5520_OFFICE_MAX);
}
static ssize_t adp5520_bl_office_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, OFFICE_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_OFFICE_MAX);
}
static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
adp5520_bl_office_max_store);
@@ -186,16 +202,17 @@ static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DAYLIGHT_MAX);
+ return adp5520_show(dev, buf, ADP5520_DAYLIGHT_MAX);
}
static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adp5520_bl *data = dev_get_drvdata(dev);
strict_strtoul(buf, 10, &data->cached_daylight_max);
- return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX);
}
static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
adp5520_bl_daylight_max_store);
@@ -203,14 +220,14 @@ static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DARK_DIM);
+ return adp5520_show(dev, buf, ADP5520_DARK_DIM);
}
static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DARK_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_DARK_DIM);
}
static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
adp5520_bl_dark_dim_store);
@@ -218,29 +235,29 @@ static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
static ssize_t adp5520_bl_office_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, OFFICE_DIM);
+ return adp5520_show(dev, buf, ADP5520_OFFICE_DIM);
}
static ssize_t adp5520_bl_office_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, OFFICE_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_OFFICE_DIM);
}
static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
adp5520_bl_office_dim_store);
static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DAYLIGHT_DIM);
+ return adp5520_show(dev, buf, ADP5520_DAYLIGHT_DIM);
}
static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_DIM);
}
static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
adp5520_bl_daylight_dim_store);
@@ -316,7 +333,7 @@ static int __devexit adp5520_bl_remove(struct platform_device *pdev)
struct backlight_device *bl = platform_get_drvdata(pdev);
struct adp5520_bl *data = bl_get_data(bl);
- adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
+ adp5520_clr_bits(data->master, ADP5520_MODE_STATUS, ADP5520_BL_EN);
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&bl->dev.kobj,
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 7fcb0eb54c60..f2d76dae1eb3 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -177,7 +177,7 @@ static int da903x_backlight_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops da903x_backlight_pm_ops = {
+static const struct dev_pm_ops da903x_backlight_pm_ops = {
.suspend = da903x_backlight_suspend,
.resume = da903x_backlight_resume,
};
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index a482dd7b0311..9b3be74cee5a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -101,7 +101,7 @@ static ssize_t lcd_store_power(struct device *dev,
int power = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
@@ -140,7 +140,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
int contrast = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c
index 4830b1bf51e5..80abbf323b99 100644
--- a/drivers/video/display/display-sysfs.c
+++ b/drivers/video/display/display-sysfs.c
@@ -67,7 +67,7 @@ static ssize_t display_store_contrast(struct device *dev,
contrast = simple_strtoul(buf, &endp, 0);
size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c
index e759895bf3d3..f0af911a096d 100644
--- a/drivers/video/geode/display_gx.c
+++ b/drivers/video/geode/display_gx.c
@@ -17,7 +17,7 @@
#include <asm/io.h>
#include <asm/div64.h>
#include <asm/delay.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
@@ -25,7 +25,7 @@ unsigned int gx_frame_buffer_size(void)
{
unsigned int val;
- if (!geode_has_vsa2()) {
+ if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h
index 16a96f8fd8c5..d19e9378b0c0 100644
--- a/drivers/video/geode/gxfb.h
+++ b/drivers/video/geode/gxfb.h
@@ -340,7 +340,7 @@ static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val)
}
-/* MSRs are defined in asm/geode.h; their bitfields are here */
+/* MSRs are defined in linux/cs5535.h; their bitfields are here */
#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3)
#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2)
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 2552cac39e1c..b3e639d1e12c 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -32,7 +32,7 @@
#include <linux/suspend.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 6a51448fd3f7..fc68a8b0a144 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -409,7 +409,7 @@ static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val)
}
-/* MSRs are defined in asm/geode.h; their bitfields are here */
+/* MSRs are defined in linux/cs5535.h; their bitfields are here */
#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */
#define MSR_GLCP_DOTPLL_HALFPIX (1 << 24)
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index b1cd49c99356..0e5d8c7c3eba 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -13,7 +13,7 @@
#include <linux/fb.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "lxfb.h"
@@ -307,7 +307,7 @@ unsigned int lx_framebuffer_size(void)
{
unsigned int val;
- if (!geode_has_vsa2()) {
+ if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c
index 9aff32ef8bb6..1bb043d70c64 100644
--- a/drivers/video/geode/suspend_gx.c
+++ b/drivers/video/geode/suspend_gx.c
@@ -10,7 +10,7 @@
#include <linux/fb.h>
#include <asm/io.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include <asm/delay.h>
#include "gxfb.h"
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c
index b8d52a8360db..6082f653c68a 100644
--- a/drivers/video/geode/video_gx.c
+++ b/drivers/video/geode/video_gx.c
@@ -16,7 +16,7 @@
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e7116a6d82d3..73c83a8de2d3 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -456,7 +456,7 @@ static int hitfb_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops hitfb_dev_pm_ops = {
+static const struct dev_pm_ops hitfb_dev_pm_ops = {
.suspend = hitfb_suspend,
.resume = hitfb_resume,
};
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
index 760645d9dbb6..e3eccc9af78e 100644
--- a/drivers/video/omap/lcd_2430sdp.c
+++ b/drivers/video/omap/lcd_2430sdp.c
@@ -25,7 +25,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
#include <asm/mach-types.h>
@@ -52,7 +52,7 @@ static unsigned enable_gpio;
#define TWL4030_VPLL2_DEV_GRP 0x33
#define TWL4030_VPLL2_DEDICATED 0x36
-#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
static int sdp2430_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/output.c b/drivers/video/output.c
index 5e6439ae7394..5137aa016b83 100644
--- a/drivers/video/output.c
+++ b/drivers/video/output.c
@@ -50,7 +50,7 @@ static ssize_t video_output_store_state(struct device *dev,
int request_state = simple_strtoul(buf,&endp,0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index f58a3aae6ea6..b7e58059b592 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1667,7 +1667,7 @@ static int pxafb_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxafb_pm_ops = {
+static const struct dev_pm_ops pxafb_pm_ops = {
.suspend = pxafb_suspend,
.resume = pxafb_resume,
};
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index b4b5de930cf5..8a65fb6648a6 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -890,7 +890,7 @@ static int sh_mobile_lcdc_runtime_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
.suspend = sh_mobile_lcdc_suspend,
.resume = sh_mobile_lcdc_resume,
.runtime_suspend = sh_mobile_lcdc_runtime_suspend,
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
index 77afb0acc500..9c6594473d3b 100644
--- a/drivers/watchdog/adx_wdt.c
+++ b/drivers/watchdog/adx_wdt.c
@@ -314,7 +314,7 @@ static int adx_wdt_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops adx_wdt_pm_ops = {
+static const struct dev_pm_ops adx_wdt_pm_ops = {
.suspend = adx_wdt_suspend,
.resume = adx_wdt_resume,
};
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index cb46556f2973..8162a40d1522 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
@@ -48,7 +48,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static int twl4030_wdt_write(unsigned char val)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
TWL4030_WATCHDOG_CFG_REG_OFFS);
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d44efad7a5..f8fccaaad628 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -6,6 +6,10 @@ menu "File systems"
if BLOCK
+config FS_JOURNAL_INFO
+ bool
+ default n
+
source "fs/ext2/Kconfig"
source "fs/ext3/Kconfig"
source "fs/ext4/Kconfig"
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 38502c67987c..79d2b1aa389f 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -380,7 +380,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
down_write(&current->mm->mmap_sem);
current->mm->start_brk = do_mmap(NULL, 0, stack_size,
PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
+ MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_UNINITIALIZED | MAP_GROWSDOWN,
0);
if (IS_ERR_VALUE(current->mm->start_brk)) {
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 7bb3c020e570..402afe0a0bfb 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,7 @@ config BTRFS_FS
select LIBCRC32C
select ZLIB_INFLATE
select ZLIB_DEFLATE
+ select FS_JOURNAL_INFO
help
Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features.
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 4618516dd994..c2413561ea75 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -21,6 +21,7 @@
#include <linux/mount.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/fs_struct.h>
#include "internal.h"
@@ -257,8 +258,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
if (args == data)
goto error;
*args = '\0';
- for (args++; isspace(*args); args++)
- continue;
+ args = skip_spaces(++args);
}
/* run the appropriate command handler */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 278020d2449c..14cbc831422a 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -979,24 +979,6 @@ COMPATIBLE_IOCTL(FIGETBSZ)
/* 'X' - originally XFS but some now in the VFS */
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
-/* RAID */
-COMPATIBLE_IOCTL(RAID_VERSION)
-COMPATIBLE_IOCTL(GET_ARRAY_INFO)
-COMPATIBLE_IOCTL(GET_DISK_INFO)
-COMPATIBLE_IOCTL(PRINT_RAID_DEBUG)
-COMPATIBLE_IOCTL(RAID_AUTORUN)
-COMPATIBLE_IOCTL(CLEAR_ARRAY)
-COMPATIBLE_IOCTL(ADD_NEW_DISK)
-COMPATIBLE_IOCTL(SET_ARRAY_INFO)
-COMPATIBLE_IOCTL(SET_DISK_INFO)
-COMPATIBLE_IOCTL(WRITE_RAID_INFO)
-COMPATIBLE_IOCTL(UNPROTECT_ARRAY)
-COMPATIBLE_IOCTL(PROTECT_ARRAY)
-COMPATIBLE_IOCTL(RUN_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY_RO)
-COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
-COMPATIBLE_IOCTL(GET_BITMAP_FILE)
COMPATIBLE_IOCTL(KDGETKEYCODE)
COMPATIBLE_IOCTL(KDSETKEYCODE)
COMPATIBLE_IOCTL(KDGKBTYPE)
diff --git a/fs/exec.c b/fs/exec.c
index c0c636e34f60..623a5cc3076a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -923,6 +923,15 @@ char *get_task_comm(char *buf, struct task_struct *tsk)
void set_task_comm(struct task_struct *tsk, char *buf)
{
task_lock(tsk);
+
+ /*
+ * Threads may access current->comm without holding
+ * the task lock, so write the string carefully.
+ * Readers without a lock may see incomplete new
+ * names but are safe from non-terminating string reads.
+ */
+ memset(tsk->comm, 0, TASK_COMM_LEN);
+ wmb();
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
perf_event_comm(tsk);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 9acf7e808139..e5f6774846e4 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -2,6 +2,7 @@ config EXT4_FS
tristate "The Extended 4 (ext4) filesystem"
select JBD2
select CRC16
+ select FS_JOURNAL_INFO
help
This is the next generation of the ext3 filesystem.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c1e19d5b5985..b1fd3daadc9c 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
* per cpu locality group is to reduce the contention between block
* request from multiple CPUs.
*/
- ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
+ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
/* we're going to use group allocation */
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 768c111a77ec..827bde1f2594 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2137,11 +2137,8 @@ static int parse_strtoul(const char *buf,
{
char *endp;
- while (*buf && isspace(*buf))
- buf++;
- *value = simple_strtoul(buf, &endp, 0);
- while (*endp && isspace(*endp))
- endp++;
+ *value = simple_strtoul(skip_spaces(buf), &endp, 0);
+ endp = skip_spaces(endp);
if (*endp || *value > max)
return -EINVAL;
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 4dcddf83326f..b192c661caa6 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -10,6 +10,7 @@ config GFS2_FS
select SLOW_WORK
select QUOTA
select QUOTACTL
+ select FS_JOURNAL_INFO
help
A cluster filesystem.
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c5dad1eb7b91..0dc34621f6a6 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -85,11 +85,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
buf[0] = '\0';
if (!gfs2_uuid_valid(uuid))
return 0;
- return snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X-%02X%02X-"
- "%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
- uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
- uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15]);
+ return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
}
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -575,14 +571,8 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
if (!sdp->sd_args.ar_spectator)
add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
- if (gfs2_uuid_valid(uuid)) {
- add_uevent_var(env, "UUID=%02X%02X%02X%02X-%02X%02X-%02X%02X-"
- "%02X%02X-%02X%02X%02X%02X%02X%02X",
- uuid[0], uuid[1], uuid[2], uuid[3], uuid[4],
- uuid[5], uuid[6], uuid[7], uuid[8], uuid[9],
- uuid[10], uuid[11], uuid[12], uuid[13],
- uuid[14], uuid[15]);
- }
+ if (gfs2_uuid_valid(uuid))
+ add_uevent_var(env, "UUID=%pUB", uuid);
return 0;
}
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 6d98f116ca03..424b0337f524 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -289,6 +289,10 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
err = hfs_brec_find(&src_fd);
if (err)
goto out;
+ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 7c69b98a2e45..2b3b8611b41b 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -79,6 +79,11 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
filp->f_pos++;
/* fall through */
case 1:
+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
+
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
if (entry.type != HFS_CDR_THD) {
printk(KERN_ERR "hfs: bad catalog folder thread\n");
@@ -109,6 +114,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
err = -EIO;
goto out;
}
+
+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
+
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
type = entry.type;
len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index f7fcbe49da72..5ed7252b7b23 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -409,8 +409,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* try to get the root inode */
hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
- if (!res)
+ if (!res) {
+ if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ res = -EIO;
+ goto bail;
+ }
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ }
if (res) {
hfs_find_exit(&fd);
goto bail_no_root;
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
index 4e28beeed157..a8408983abd4 100644
--- a/fs/jbd/Kconfig
+++ b/fs/jbd/Kconfig
@@ -1,5 +1,6 @@
config JBD
tristate
+ select FS_JOURNAL_INFO
help
This is a generic journalling layer for block devices. It is
currently used by the ext3 file system, but it could also be
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index f32f346f4b0a..0f7d1ceafdfd 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -1,6 +1,7 @@
config JBD2
tristate
select CRC32
+ select FS_JOURNAL_INFO
help
This is a generic journaling layer for block devices that support
both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 293fa0528a6e..73ab220354df 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -78,11 +78,6 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
- /*
- * FIXME: do we really need to run this under the BKL? If so, please
- * add a comment about what it's intended to protect.
- */
- lock_kernel();
while (!kthread_should_stop()) {
/*
* Listen for a request on the socket
@@ -104,7 +99,6 @@ nfs4_callback_svc(void *vrqstp)
preverr = err;
svc_process(rqstp);
}
- unlock_kernel();
return 0;
}
@@ -160,11 +154,6 @@ nfs41_callback_svc(void *vrqstp)
set_freezable();
- /*
- * FIXME: do we really need to run this under the BKL? If so, please
- * add a comment about what it's intended to protect.
- */
- lock_kernel();
while (!kthread_should_stop()) {
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
@@ -183,7 +172,6 @@ nfs41_callback_svc(void *vrqstp)
}
finish_wait(&serv->sv_cb_waitq, &wq);
}
- unlock_kernel();
return 0;
}
@@ -397,6 +385,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
*/
static struct svc_version *nfs4_callback_version[] = {
[1] = &nfs4_callback_version1,
+ [4] = &nfs4_callback_version4,
};
static struct svc_stat nfs4_callback_stats;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 07baa8254ca1..d4036be0b589 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -106,6 +106,19 @@ struct cb_sequenceres {
extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
struct cb_sequenceres *res);
+extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
+
+#define RCA4_TYPE_MASK_RDATA_DLG 0
+#define RCA4_TYPE_MASK_WDATA_DLG 1
+
+struct cb_recallanyargs {
+ struct sockaddr *craa_addr;
+ uint32_t craa_objs_to_keep;
+ uint32_t craa_type_mask;
+};
+
+extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
#endif /* CONFIG_NFS_V4_1 */
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
@@ -114,8 +127,9 @@ extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
extern void nfs_callback_down(int minorversion);
+extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
#endif /* CONFIG_NFS_V4 */
-
/*
* nfs41: Callbacks are expected to not cause substantial latency,
* so we limit their concurrency to 1 by setting up the maximum number
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index b7da1f54da68..defa9b4c470e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -61,6 +61,16 @@ out:
return res->status;
}
+static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_minorversion > 0)
+ return nfs41_validate_delegation_stateid;
+#endif
+ return nfs4_validate_delegation_stateid;
+}
+
+
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
{
struct nfs_client *clp;
@@ -81,7 +91,8 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
inode = nfs_delegation_find_inode(clp, &args->fh);
if (inode != NULL) {
/* Set up a helper thread to actually return the delegation */
- switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
+ switch (nfs_async_inode_return_delegation(inode, &args->stateid,
+ nfs_validate_delegation_stateid(clp))) {
case 0:
res = 0;
break;
@@ -102,8 +113,31 @@ out:
return res;
}
+int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
+{
+ if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
+ sizeof(delegation->stateid.data)) != 0)
+ return 0;
+ return 1;
+}
+
#if defined(CONFIG_NFS_V4_1)
+int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
+{
+ if (delegation == NULL)
+ return 0;
+
+ /* seqid is 4-bytes long */
+ if (((u32 *) &stateid->data)[0] != 0)
+ return 0;
+ if (memcmp(&delegation->stateid.data[4], &stateid->data[4],
+ sizeof(stateid->data)-4))
+ return 0;
+
+ return 1;
+}
+
/*
* Validate the sequenceID sent by the server.
* Return success if the sequenceID is one more than what we last saw on
@@ -227,4 +261,32 @@ out:
return res->csr_status;
}
+unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+{
+ struct nfs_client *clp;
+ int status;
+ fmode_t flags = 0;
+
+ status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ clp = nfs_find_client(args->craa_addr, 4);
+ if (clp == NULL)
+ goto out;
+
+ dprintk("NFS: RECALL_ANY callback request from %s\n",
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags = FMODE_READ;
+ if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags |= FMODE_WRITE;
+
+ if (flags)
+ nfs_expire_all_delegation_types(clp, flags);
+ status = htonl(NFS4_OK);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 76b0aa0f73bf..8e1a2511c8be 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -23,6 +23,7 @@
#if defined(CONFIG_NFS_V4_1)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3)
+#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -326,6 +327,25 @@ out_free:
goto out;
}
+static unsigned decode_recallany_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_recallanyargs *args)
+{
+ uint32_t *p;
+
+ args->craa_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->craa_objs_to_keep = ntohl(*p++);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->craa_type_mask = ntohl(*p);
+
+ return 0;
+}
+
#endif /* CONFIG_NFS_V4_1 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
@@ -533,6 +553,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_GETATTR:
case OP_CB_RECALL:
case OP_CB_SEQUENCE:
+ case OP_CB_RECALL_ANY:
*op = &callback_ops[op_nr];
break;
@@ -540,7 +561,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_NOTIFY_DEVICEID:
case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG:
- case OP_CB_RECALL_ANY:
case OP_CB_RECALLABLE_OBJ_AVAIL:
case OP_CB_RECALL_SLOT:
case OP_CB_WANTS_CANCELLED:
@@ -688,6 +708,11 @@ static struct callback_op callback_ops[] = {
.encode_res = (callback_encode_res_t)encode_cb_sequence_res,
.res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
},
+ [OP_CB_RECALL_ANY] = {
+ .process_op = (callback_process_op_t)nfs4_callback_recallany,
+ .decode_args = (callback_decode_arg_t)decode_recallany_args,
+ .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
+ },
#endif /* CONFIG_NFS_V4_1 */
};
@@ -718,3 +743,10 @@ struct svc_version nfs4_callback_version1 = {
.vs_dispatch = NULL,
};
+struct svc_version nfs4_callback_version4 = {
+ .vs_vers = 4,
+ .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
+ .vs_proc = nfs4_callback_procedures1,
+ .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+ .vs_dispatch = NULL,
+};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 99ea196f071f..ee77713ce68b 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1260,10 +1260,20 @@ error:
static void nfs4_session_set_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
+ struct nfs4_session *sess;
+ u32 server_resp_sz;
+ u32 server_rqst_sz;
+
if (!nfs4_has_session(server->nfs_client))
return;
- server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
- server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
+ sess = server->nfs_client->cl_session;
+ server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+
+ if (server->rsize > server_resp_sz)
+ server->rsize = server_resp_sz;
+ if (server->wsize > server_rqst_sz)
+ server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 6dd48a4405b4..2563bebc4c67 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -92,7 +92,7 @@ out:
return status;
}
-static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
+static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *ctx;
@@ -116,10 +116,11 @@ again:
err = nfs_delegation_claim_locks(ctx, state);
put_nfs_open_context(ctx);
if (err != 0)
- return;
+ return err;
goto again;
}
spin_unlock(&inode->i_lock);
+ return 0;
}
/*
@@ -261,30 +262,34 @@ static void nfs_msync_inode(struct inode *inode)
/*
* Basic procedure for returning a delegation to the server
*/
-static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
+static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ int err;
- nfs_msync_inode(inode);
/*
* Guard against new delegated open/lock/unlock calls and against
* state recovery
*/
down_write(&nfsi->rwsem);
- nfs_delegation_claim_opens(inode, &delegation->stateid);
+ err = nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
- nfs_msync_inode(inode);
+ if (err)
+ goto out;
- return nfs_do_return_delegation(inode, delegation, 1);
+ err = nfs_do_return_delegation(inode, delegation, issync);
+out:
+ return err;
}
/*
* Return all delegations that have been marked for return
*/
-void nfs_client_return_marked_delegations(struct nfs_client *clp)
+int nfs_client_return_marked_delegations(struct nfs_client *clp)
{
struct nfs_delegation *delegation;
struct inode *inode;
+ int err = 0;
restart:
rcu_read_lock();
@@ -298,12 +303,18 @@ restart:
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
- if (delegation != NULL)
- __nfs_inode_return_delegation(inode, delegation);
+ if (delegation != NULL) {
+ filemap_flush(inode->i_mapping);
+ err = __nfs_inode_return_delegation(inode, delegation, 0);
+ }
iput(inode);
- goto restart;
+ if (!err)
+ goto restart;
+ set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ return err;
}
rcu_read_unlock();
+ return 0;
}
/*
@@ -338,8 +349,10 @@ int nfs_inode_return_delegation(struct inode *inode)
spin_lock(&clp->cl_lock);
delegation = nfs_detach_delegation_locked(nfsi, NULL);
spin_unlock(&clp->cl_lock);
- if (delegation != NULL)
- err = __nfs_inode_return_delegation(inode, delegation);
+ if (delegation != NULL) {
+ nfs_msync_inode(inode);
+ err = __nfs_inode_return_delegation(inode, delegation, 1);
+ }
}
return err;
}
@@ -368,33 +381,47 @@ void nfs_super_return_all_delegations(struct super_block *sb)
spin_unlock(&delegation->lock);
}
rcu_read_unlock();
- nfs_client_return_marked_delegations(clp);
+ if (nfs_client_return_marked_delegations(clp) != 0)
+ nfs4_schedule_state_manager(clp);
}
-static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
+static
+void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
{
struct nfs_delegation *delegation;
rcu_read_lock();
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
+ continue;
+ if (delegation->type & flags)
+ nfs_mark_return_delegation(clp, delegation);
}
rcu_read_unlock();
}
+static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
+{
+ nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
+}
+
static void nfs_delegation_run_state_manager(struct nfs_client *clp)
{
if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
nfs4_schedule_state_manager(clp);
}
-void nfs_expire_all_delegations(struct nfs_client *clp)
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
{
- nfs_client_mark_return_all_delegations(clp);
+ nfs_client_mark_return_all_delegation_types(clp, flags);
nfs_delegation_run_state_manager(clp);
}
+void nfs_expire_all_delegations(struct nfs_client *clp)
+{
+ nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
+}
+
/*
* Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
*/
@@ -413,8 +440,7 @@ static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *c
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
continue;
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ nfs_mark_return_delegation(clp, delegation);
}
rcu_read_unlock();
}
@@ -428,18 +454,21 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
/*
* Asynchronous delegation recall!
*/
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
+ int (*validate_stateid)(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid))
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
- sizeof(delegation->stateid.data)) != 0) {
+
+ if (!validate_stateid(delegation, stateid)) {
rcu_read_unlock();
return -ENOENT;
}
+
nfs_mark_return_delegation(clp, delegation);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 09f383795174..944b627ec6e1 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,15 +34,18 @@ enum {
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
int nfs_inode_return_delegation(struct inode *inode);
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
+ int (*validate_stateid)(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid));
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
void nfs_super_return_all_delegations(struct super_block *sb);
void nfs_expire_all_delegations(struct nfs_client *clp);
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
void nfs_handle_cb_pathdown(struct nfs_client *clp);
-void nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_client_return_marked_delegations(struct nfs_client *clp);
void nfs_delegation_mark_reclaim(struct nfs_client *clp);
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7cb298525eef..2c5ace4f00a7 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1579,55 +1579,46 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *rehash = NULL;
int error = -EBUSY;
- /*
- * To prevent any new references to the target during the rename,
- * we unhash the dentry and free the inode in advance.
- */
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- rehash = new_dentry;
- }
-
dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
atomic_read(&new_dentry->d_count));
/*
- * First check whether the target is busy ... we can't
- * safely do _any_ rename if the target is in use.
- *
- * For files, make a copy of the dentry and then do a
- * silly-rename. If the silly-rename succeeds, the
- * copied dentry is hashed and becomes the new target.
+ * For non-directories, check whether the target is busy and if so,
+ * make a copy of the dentry and then do a silly-rename. If the
+ * silly-rename succeeds, the copied dentry is hashed and becomes
+ * the new target.
*/
- if (!new_inode)
- goto go_ahead;
- if (S_ISDIR(new_inode->i_mode)) {
- error = -EISDIR;
- if (!S_ISDIR(old_inode->i_mode))
- goto out;
- } else if (atomic_read(&new_dentry->d_count) > 2) {
- int err;
- /* copy the target dentry's name */
- dentry = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!dentry)
- goto out;
+ if (new_inode && !S_ISDIR(new_inode->i_mode)) {
+ /*
+ * To prevent any new references to the target during the
+ * rename, we unhash the dentry in advance.
+ */
+ if (!d_unhashed(new_dentry)) {
+ d_drop(new_dentry);
+ rehash = new_dentry;
+ }
+
+ if (atomic_read(&new_dentry->d_count) > 2) {
+ int err;
+
+ /* copy the target dentry's name */
+ dentry = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!dentry)
+ goto out;
- /* silly-rename the existing target ... */
- err = nfs_sillyrename(new_dir, new_dentry);
- if (!err) {
- new_dentry = rehash = dentry;
+ /* silly-rename the existing target ... */
+ err = nfs_sillyrename(new_dir, new_dentry);
+ if (err)
+ goto out;
+
+ new_dentry = dentry;
new_inode = NULL;
- /* instantiate the replacement target */
- d_instantiate(new_dentry, NULL);
- } else if (atomic_read(&new_dentry->d_count) > 1)
- /* dentry still busy? */
- goto out;
+ }
}
-go_ahead:
/*
* ... prune child dentries and writebacks if needed.
*/
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index f4d54ba97cc6..95e1ca765d47 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -146,7 +146,7 @@ static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
return 0;
}
-struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
+static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
struct nfs_dns_ent *key)
{
struct cache_head *ch;
@@ -159,7 +159,7 @@ struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
return container_of(ch, struct nfs_dns_ent, h);
}
-struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
+static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
struct nfs_dns_ent *new,
struct nfs_dns_ent *key)
{
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e21b1bb9972f..29e464d23b32 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -30,6 +30,15 @@ static inline int nfs4_has_session(const struct nfs_client *clp)
return 0;
}
+static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ return (clp->cl_session->flags & SESSION4_PERSIST);
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -156,6 +165,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
@@ -177,24 +187,14 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
-/* nfs4proc.c */
-static inline void nfs4_restart_rpc(struct rpc_task *task,
- const struct nfs_client *clp)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp) &&
- test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
- rpc_restart_call_prepare(task);
- return;
- }
-#endif /* CONFIG_NFS_V4_1 */
- rpc_restart_call(task);
-}
-
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
#endif
+#ifdef CONFIG_NFS_V4_1
+extern const u32 nfs41_maxread_overhead;
+extern const u32 nfs41_maxwrite_overhead;
+#endif
/* nfs4proc.c */
#ifdef CONFIG_NFS_V4
@@ -273,20 +273,6 @@ extern int _nfs4_call_sync_session(struct nfs_server *server,
struct nfs4_sequence_res *res,
int cache_reply);
-#ifdef CONFIG_NFS_V4_1
-extern void nfs41_sequence_free_slot(const struct nfs_client *,
- struct nfs4_sequence_res *res);
-#endif /* CONFIG_NFS_V4_1 */
-
-static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp))
- nfs41_sequence_free_slot(clp, res);
-#endif /* CONFIG_NFS_V4_1 */
-}
-
/*
* Determine the device name as a string
*/
@@ -380,3 +366,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
return ((unsigned long)len + (unsigned long)base +
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+
+/*
+ * Helper for restarting RPC calls in the possible presence of NFSv4.1
+ * sessions.
+ */
+static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp))
+ rpc_restart_call_prepare(task);
+ else
+ rpc_restart_call(task);
+}
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index ceda50aad73c..46d779abafd3 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -25,13 +25,7 @@ struct nfs_iostats {
static inline void nfs_inc_server_stats(const struct nfs_server *server,
enum nfs_stat_eventcounters stat)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(server->io_stats, cpu);
- iostats->events[stat]++;
- put_cpu();
+ this_cpu_inc(server->io_stats->events[stat]);
}
static inline void nfs_inc_stats(const struct inode *inode,
@@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
enum nfs_stat_bytecounters stat,
unsigned long addend)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(server->io_stats, cpu);
- iostats->bytes[stat] += addend;
- put_cpu();
+ this_cpu_add(server->io_stats->bytes[stat], addend);
}
static inline void nfs_add_stats(const struct inode *inode,
@@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
enum nfs_stat_fscachecounters stat,
unsigned long addend)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
- iostats->fscache[stat] += addend;
- put_cpu();
+ this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
}
#endif
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6ea07a3c75d4..7e57b04e4014 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,7 +44,8 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
- NFS4CLNT_SESSION_SETUP,
+ NFS4CLNT_SESSION_RESET,
+ NFS4CLNT_SESSION_DRAINING,
};
/*
@@ -180,6 +181,7 @@ struct nfs4_state_recovery_ops {
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+ int (*reclaim_complete)(struct nfs_client *);
};
struct nfs4_state_maintenance_ops {
@@ -200,9 +202,11 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -218,9 +222,11 @@ extern int nfs4_setup_sequence(struct nfs_client *clp,
int cache_reply, struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *, int reset);
+extern int nfs4_proc_create_session(struct nfs_client *);
extern int nfs4_proc_destroy_session(struct nfs4_session *);
extern int nfs4_init_session(struct nfs_server *server);
+extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
+ struct nfs_fsinfo *fsinfo);
#else /* CONFIG_NFS_v4_1 */
static inline int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -267,6 +273,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
+extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
@@ -287,6 +294,7 @@ struct nfs4_mount_data;
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
#else
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 741a562177fc..9f5f11ecfd93 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -270,11 +270,18 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
exception->retry = 1;
- /* FALLTHROUGH */
+ break;
#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
+ if (exception->timeout > HZ) {
+ /* We have retried a decent amount, time to
+ * fail
+ */
+ ret = -EBUSY;
+ break;
+ }
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
ret = nfs4_delay(server->client, &exception->timeout);
@@ -311,48 +318,54 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* so we need to scan down from highest_used_slotid to 0 looking for the now
* highest slotid in use.
* If none found, highest_used_slotid is set to -1.
+ *
+ * Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
int slotid = free_slotid;
- spin_lock(&tbl->slot_tbl_lock);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
/* update highest_used_slotid when it is freed */
if (slotid == tbl->highest_used_slotid) {
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
- if (slotid >= 0 && slotid < tbl->max_slots)
+ if (slotid < tbl->max_slots)
tbl->highest_used_slotid = slotid;
else
tbl->highest_used_slotid = -1;
}
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
- spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
free_slotid, tbl->highest_used_slotid);
}
-void nfs41_sequence_free_slot(const struct nfs_client *clp,
+static void nfs41_sequence_free_slot(const struct nfs_client *clp,
struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
- if (!nfs4_has_session(clp)) {
- dprintk("%s: No session\n", __func__);
- return;
- }
tbl = &clp->cl_session->fc_slot_table;
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
- dprintk("%s: No slot\n", __func__);
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ dprintk("%s: No slot\n", __func__);
return;
}
+
+ spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slotid);
+
+ /* Signal state manager thread if session is drained */
+ if (test_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
+ if (tbl->highest_used_slotid == -1) {
+ dprintk("%s COMPLETE: Session Drained\n", __func__);
+ complete(&clp->cl_session->complete);
+ }
+ } else
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ spin_unlock(&tbl->slot_tbl_lock);
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
}
@@ -377,10 +390,10 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
goto out;
- tbl = &clp->cl_session->fc_slot_table;
- slot = tbl->slots + res->sr_slotid;
-
+ /* Check the SEQUENCE operation status */
if (res->sr_status == 0) {
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
/* Update the slot's sequence and clientid lease timer */
++slot->seq_nr;
timestamp = res->sr_renewal_time;
@@ -388,7 +401,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (time_before(clp->cl_last_renewal, timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
- return;
+ /* Check sequence flags */
+ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
}
out:
/* The session may be reset by one of the error handlers. */
@@ -429,24 +443,6 @@ out:
return ret_id;
}
-static int nfs4_recover_session(struct nfs4_session *session)
-{
- struct nfs_client *clp = session->clp;
- unsigned int loop;
- int ret;
-
- for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
- ret = nfs4_wait_clnt_recover(clp);
- if (ret != 0)
- break;
- if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
- break;
- nfs4_schedule_state_manager(clp);
- ret = -EIO;
- }
- return ret;
-}
-
static int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -455,7 +451,6 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
- int status = 0;
u8 slotid;
dprintk("--> %s\n", __func__);
@@ -468,21 +463,15 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
- if (tbl->highest_used_slotid != -1) {
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("<-- %s: Session reset: draining\n", __func__);
- return -EAGAIN;
- }
-
- /* The slot table is empty; start the reset thread */
- dprintk("%s Session Reset\n", __func__);
+ if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state)) {
+ /*
+ * The state manager will wait until the slot table is empty.
+ * Schedule the reset thread
+ */
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
- status = nfs4_recover_session(session);
- if (status)
- return status;
- spin_lock(&tbl->slot_tbl_lock);
+ dprintk("%s Schedule Session Reset\n", __func__);
+ return -EAGAIN;
}
slotid = nfs4_find_slot(tbl, task);
@@ -527,7 +516,7 @@ int nfs4_setup_sequence(struct nfs_client *clp,
goto out;
ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
task);
- if (ret != -EAGAIN) {
+ if (ret && ret != -EAGAIN) {
/* terminate rpc task */
task->tk_status = ret;
task->tk_action = NULL;
@@ -561,7 +550,6 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
- nfs41_sequence_free_slot(data->clp, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
@@ -637,15 +625,6 @@ static void nfs4_sequence_done(const struct nfs_server *server,
#endif /* CONFIG_NFS_V4_1 */
}
-/* no restart, therefore free slot here */
-static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
- struct nfs4_sequence_res *res,
- int rpc_status)
-{
- nfs4_sequence_done(server, res, rpc_status);
- nfs4_sequence_free_slot(server->nfs_client, res);
-}
-
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -720,9 +699,15 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_EXCL) {
- u32 *s = (u32 *) p->o_arg.u.verifier.data;
- s[0] = jiffies;
- s[1] = current->pid;
+ if (nfs4_has_persistent_session(server->nfs_client)) {
+ /* GUARDED */
+ p->o_arg.u.attrs = &p->attrs;
+ memcpy(&p->attrs, attrs, sizeof(p->attrs));
+ } else { /* EXCLUSIVE4_1 */
+ u32 *s = (u32 *) p->o_arg.u.verifier.data;
+ s[0] = jiffies;
+ s[1] = current->pid;
+ }
} else if (flags & O_CREAT) {
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
@@ -776,13 +761,16 @@ static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode
goto out;
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
- ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
+ && state->n_rdonly != 0;
break;
case FMODE_WRITE:
- ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
+ && state->n_wronly != 0;
break;
case FMODE_READ|FMODE_WRITE:
- ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
+ && state->n_rdwr != 0;
}
out:
return ret;
@@ -1183,6 +1171,14 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -ENOENT:
case -ESTALE:
goto out;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ nfs4_schedule_state_recovery(
+ server->nfs_client);
+ goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
@@ -1336,8 +1332,8 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
- nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
@@ -1488,7 +1484,7 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
return ret;
}
-static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
+static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
@@ -1496,10 +1492,16 @@ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4
do {
err = _nfs4_open_expired(ctx, state);
- if (err != -NFS4ERR_DELAY)
- break;
- nfs4_handle_exception(server, err, &exception);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
} while (exception.retry);
+out:
return err;
}
@@ -1712,6 +1714,18 @@ static void nfs4_free_closedata(void *data)
kfree(calldata);
}
+static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
+ fmode_t fmode)
+{
+ spin_lock(&state->owner->so_lock);
+ if (!(fmode & FMODE_READ))
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ if (!(fmode & FMODE_WRITE))
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_unlock(&state->owner->so_lock);
+}
+
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
@@ -1728,6 +1742,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case 0:
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
+ nfs4_close_clear_stateid_flags(state,
+ calldata->arg.fmode);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
@@ -1737,11 +1753,10 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs_restart_rpc(task, server->nfs_client);
return;
}
}
- nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -1749,38 +1764,39 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
- int clear_rd, clear_wr, clear_rdwr;
+ int call_close = 0;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
- clear_rd = clear_wr = clear_rdwr = 0;
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+ calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
- clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
- clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
- clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_WRITE;
}
}
spin_unlock(&state->owner->so_lock);
- if (!clear_rd && !clear_wr && !clear_rdwr) {
+
+ if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
return;
}
+
+ if (calldata->arg.fmode == 0)
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+
nfs_fattr_init(calldata->res.fattr);
- if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
- task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.fmode = FMODE_READ;
- } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
- task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.fmode = FMODE_WRITE;
- }
calldata->timestamp = jiffies;
if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
&calldata->arg.seq_args, &calldata->res.seq_res,
@@ -1981,7 +1997,7 @@ out_drop:
return 0;
}
-void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
+static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
if (ctx->state == NULL)
return;
@@ -2532,7 +2548,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
- nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2971,11 +2986,10 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
dprintk("--> %s\n", __func__);
- /* nfs4_sequence_free_slot called in the read rpc_call_done */
nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
@@ -2995,12 +3009,11 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- /* slot is freed in nfs_writeback_done */
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -3028,11 +3041,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
- nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
- &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -3350,7 +3361,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
@@ -3483,12 +3494,23 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
- nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->res.server, &data->res.seq_res,
+ task->tk_status);
- data->rpc_status = task->tk_status;
- if (data->rpc_status == 0)
+ switch (task->tk_status) {
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case 0:
renew_lease(data->res.server, data->timestamp);
+ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+ nfs_restart_rpc(task, data->res.server->nfs_client);
+ return;
+ }
+ }
+ data->rpc_status = task->tk_status;
}
static void nfs4_delegreturn_release(void *calldata)
@@ -3741,11 +3763,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- nfs4_restart_rpc(task,
- calldata->server->nfs_client);
+ nfs_restart_rpc(task,
+ calldata->server->nfs_client);
}
- nfs4_sequence_free_slot(calldata->server->nfs_client,
- &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3927,8 +3947,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
dprintk("%s: begin!\n", __func__);
- nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->server, &data->res.seq_res,
+ task->tk_status);
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
@@ -4049,10 +4069,16 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, 0);
- if (err != -NFS4ERR_DELAY)
- break;
- nfs4_handle_exception(server, err, &exception);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
} while (exception.retry);
+out:
return err;
}
@@ -4172,6 +4198,11 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_EXPIRED:
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
nfs4_schedule_state_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
@@ -4296,7 +4327,7 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
-static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
@@ -4318,6 +4349,9 @@ static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
+ /* Remove server-only flags */
+ args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
+
p = (u32 *)verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4389,10 +4423,9 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
- nfs4_restart_rpc(task, data->clp);
+ nfs_restart_rpc(task, data->clp);
return;
}
- nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
dprintk("<-- %s\n", __func__);
}
@@ -4465,7 +4498,6 @@ static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
spin_lock(&tbl->slot_tbl_lock);
for (i = 0; i < max_slots; ++i)
tbl->slots[i].seq_nr = ivalue;
- tbl->highest_used_slotid = -1;
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
@@ -4515,7 +4547,6 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session)
static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
int max_slots, int ivalue)
{
- int i;
struct nfs4_slot *slot;
int ret = -ENOMEM;
@@ -4526,18 +4557,9 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
if (!slot)
goto out;
- for (i = 0; i < max_slots; ++i)
- slot[i].seq_nr = ivalue;
ret = 0;
spin_lock(&tbl->slot_tbl_lock);
- if (tbl->slots != NULL) {
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
- __func__, tbl, tbl->slots);
- WARN_ON(1);
- goto out_free;
- }
tbl->max_slots = max_slots;
tbl->slots = slot;
tbl->highest_used_slotid = -1; /* no slot is currently used */
@@ -4547,10 +4569,6 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
-
-out_free:
- kfree(slot);
- goto out;
}
/*
@@ -4558,17 +4576,24 @@ out_free:
*/
static int nfs4_init_slot_tables(struct nfs4_session *session)
{
- int status;
+ struct nfs4_slot_table *tbl;
+ int status = 0;
- status = nfs4_init_slot_table(&session->fc_slot_table,
- session->fc_attrs.max_reqs, 1);
- if (status)
- return status;
+ tbl = &session->fc_slot_table;
+ if (tbl->slots == NULL) {
+ status = nfs4_init_slot_table(tbl,
+ session->fc_attrs.max_reqs, 1);
+ if (status)
+ return status;
+ }
- status = nfs4_init_slot_table(&session->bc_slot_table,
- session->bc_attrs.max_reqs, 0);
- if (status)
- nfs4_destroy_slot_tables(session);
+ tbl = &session->bc_slot_table;
+ if (tbl->slots == NULL) {
+ status = nfs4_init_slot_table(tbl,
+ session->bc_attrs.max_reqs, 0);
+ if (status)
+ nfs4_destroy_slot_tables(session);
+ }
return status;
}
@@ -4582,7 +4607,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
if (!session)
return NULL;
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
/*
* The create session reply races with the server back
* channel probe. Mark the client NFS_CS_SESSION_INITING
@@ -4590,12 +4614,15 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
* nfs_client struct
*/
clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ init_completion(&session->complete);
tbl = &session->fc_slot_table;
+ tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
tbl = &session->bc_slot_table;
+ tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
@@ -4747,11 +4774,10 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
-int nfs4_proc_create_session(struct nfs_client *clp, int reset)
+int nfs4_proc_create_session(struct nfs_client *clp)
{
int status;
unsigned *ptr;
- struct nfs_fsinfo fsinfo;
struct nfs4_session *session = clp->cl_session;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
@@ -4760,35 +4786,19 @@ int nfs4_proc_create_session(struct nfs_client *clp, int reset)
if (status)
goto out;
- /* Init or reset the fore channel */
- if (reset)
- status = nfs4_reset_slot_tables(session);
- else
- status = nfs4_init_slot_tables(session);
- dprintk("fore channel slot table initialization returned %d\n", status);
+ /* Init and reset the fore channel */
+ status = nfs4_init_slot_tables(session);
+ dprintk("slot table initialization returned %d\n", status);
+ if (status)
+ goto out;
+ status = nfs4_reset_slot_tables(session);
+ dprintk("slot table reset returned %d\n", status);
if (status)
goto out;
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
-
- if (reset)
- /* Lease time is aleady set */
- goto out;
-
- /* Get the lease time */
- status = nfs4_proc_get_lease_time(clp, &fsinfo);
- if (status == 0) {
- /* Update lease time and schedule renewal */
- spin_lock(&clp->cl_lock);
- clp->cl_lease_time = fsinfo.lease_time * HZ;
- clp->cl_last_renewal = jiffies;
- clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- spin_unlock(&clp->cl_lock);
-
- nfs4_schedule_state_renewal(clp);
- }
out:
dprintk("<-- %s\n", __func__);
return status;
@@ -4827,13 +4837,16 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
+ struct nfs4_session *session;
int ret;
if (!nfs4_has_session(clp))
return 0;
- clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
- clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+ session = clp->cl_session;
+ session->fc_attrs.max_rqst_sz = server->wsize + nfs41_maxwrite_overhead;
+ session->fc_attrs.max_resp_sz = server->rsize + nfs41_maxread_overhead;
+
ret = nfs4_recover_expired_lease(server);
if (!ret)
ret = nfs4_check_client_ready(clp);
@@ -4872,11 +4885,10 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) {
- nfs4_restart_rpc(task, clp);
+ nfs_restart_rpc(task, clp);
return;
}
}
- nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
kfree(task->tk_msg.rpc_argp);
@@ -4931,6 +4943,109 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
&nfs41_sequence_ops, (void *)clp);
}
+struct nfs4_reclaim_complete_data {
+ struct nfs_client *clp;
+ struct nfs41_reclaim_complete_args arg;
+ struct nfs41_reclaim_complete_res res;
+};
+
+static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args,
+ &calldata->res.seq_res, 0, task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+ struct nfs4_sequence_res *res = &calldata->res.seq_res;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(clp, res, task->tk_status);
+ switch (task->tk_status) {
+ case 0:
+ case -NFS4ERR_COMPLETE_ALREADY:
+ break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ /*
+ * Handle the session error, but do not retry the operation, as
+ * we have no way of telling whether the clientid had to be
+ * reset before we got our reply. If reset, a new wave of
+ * reclaim operations will follow, containing their own reclaim
+ * complete. We don't want our retry to get on the way of
+ * recovery by incorrectly indicating to the server that we're
+ * done reclaiming state since the process had to be restarted.
+ */
+ _nfs4_async_handle_error(task, NULL, clp, NULL);
+ break;
+ default:
+ if (_nfs4_async_handle_error(
+ task, NULL, clp, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_free_reclaim_complete_data(void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ kfree(calldata);
+}
+
+static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
+ .rpc_call_prepare = nfs4_reclaim_complete_prepare,
+ .rpc_call_done = nfs4_reclaim_complete_done,
+ .rpc_release = nfs4_free_reclaim_complete_data,
+};
+
+/*
+ * Issue a global reclaim complete.
+ */
+static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
+{
+ struct nfs4_reclaim_complete_data *calldata;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_reclaim_complete_call_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = -ENOMEM;
+
+ dprintk("--> %s\n", __func__);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
+ if (calldata == NULL)
+ goto out;
+ calldata->clp = clp;
+ calldata->arg.one_fs = 0;
+ calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+
+ msg.rpc_argp = &calldata->arg;
+ msg.rpc_resp = &calldata->res;
+ task_setup_data.callback_data = calldata;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ rpc_put_task(task);
+out:
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -4948,8 +5063,9 @@ struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
+ .reclaim_complete = nfs41_proc_reclaim_complete,
};
#endif /* CONFIG_NFS_V4_1 */
@@ -4968,7 +5084,7 @@ struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2ef4fecf3984..e76427e6346f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -116,6 +116,68 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
#if defined(CONFIG_NFS_V4_1)
+static int nfs41_setup_state_renewal(struct nfs_client *clp)
+{
+ int status;
+ struct nfs_fsinfo fsinfo;
+
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+
+ return status;
+}
+
+static void nfs41_end_drain_session(struct nfs_client *clp,
+ struct nfs4_session *ses)
+{
+ if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state))
+ rpc_wake_up(&ses->fc_slot_table.slot_tbl_waitq);
+}
+
+static int nfs41_begin_drain_session(struct nfs_client *clp,
+ struct nfs4_session *ses)
+{
+ struct nfs4_slot_table *tbl = &ses->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
+ if (tbl->highest_used_slotid != -1) {
+ INIT_COMPLETION(ses->complete);
+ spin_unlock(&tbl->slot_tbl_lock);
+ return wait_for_completion_interruptible(&ses->complete);
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+ return 0;
+}
+
+int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+
+ status = nfs41_begin_drain_session(clp, clp->cl_session);
+ if (status != 0)
+ goto out;
+ status = nfs4_proc_exchange_id(clp, cred);
+ if (status != 0)
+ goto out;
+ status = nfs4_proc_create_session(clp);
+ if (status != 0)
+ goto out;
+ nfs41_end_drain_session(clp, clp->cl_session);
+ nfs41_setup_state_renewal(clp);
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+out:
+ return status;
+}
+
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
@@ -877,6 +939,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out;
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
@@ -959,6 +1025,10 @@ restart:
case -NFS4ERR_NO_GRACE:
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out_err;
}
nfs4_put_open_state(state);
@@ -1011,6 +1081,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
}
+static void nfs4_reclaim_complete(struct nfs_client *clp,
+ const struct nfs4_state_recovery_ops *ops)
+{
+ /* Notify the server we're done reclaiming our state */
+ if (ops->reclaim_complete)
+ (void)ops->reclaim_complete(clp);
+}
+
static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
@@ -1020,6 +1098,9 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
return;
+ nfs4_reclaim_complete(clp,
+ nfs4_reboot_recovery_ops[clp->cl_minorversion]);
+
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
spin_lock(&sp->so_lock);
@@ -1046,25 +1127,25 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
}
-static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp)
-{
- clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
-}
-
-static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
+static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
- break;
+ return 0;
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_end_reclaim_reboot(clp);
+ return 0;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_end_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ break;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -1072,8 +1153,11 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* Zero session reset errors */
+ return 0;
}
+ return error;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1093,8 +1177,7 @@ restart:
if (status < 0) {
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
- nfs4_recovery_handle_error(clp, status);
- return status;
+ return nfs4_recovery_handle_error(clp, status);
}
nfs4_put_state_owner(sp);
goto restart;
@@ -1124,8 +1207,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
- nfs4_recovery_handle_error(clp, status);
- return status;
+ return nfs4_recovery_handle_error(clp, status);
}
static int nfs4_reclaim_lease(struct nfs_client *clp)
@@ -1151,55 +1233,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
}
#ifdef CONFIG_NFS_V4_1
-static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
+void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
- switch (err) {
- case -NFS4ERR_STALE_CLIENTID:
+ if (!flags)
+ return;
+ else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
- }
+ nfs4_state_start_reclaim_reboot(clp);
+ nfs4_schedule_state_recovery(clp);
+ } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ SEQ4_STATUS_ADMIN_STATE_REVOKED |
+ SEQ4_STATUS_RECALLABLE_STATE_REVOKED |
+ SEQ4_STATUS_LEASE_MOVED)) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_nograce(clp);
+ nfs4_schedule_state_recovery(clp);
+ } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_BACKCHANNEL_FAULT |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ nfs_expire_all_delegations(clp);
}
static int nfs4_reset_session(struct nfs_client *clp)
{
+ struct nfs4_session *ses = clp->cl_session;
int status;
+ status = nfs41_begin_drain_session(clp, ses);
+ if (status != 0)
+ return status;
+
status = nfs4_proc_destroy_session(clp->cl_session);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
- nfs4_session_recovery_handle_error(clp, status);
+ status = nfs4_recovery_handle_error(clp, status);
goto out;
}
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
- status = nfs4_proc_create_session(clp, 1);
+ status = nfs4_proc_create_session(clp);
if (status)
- nfs4_session_recovery_handle_error(clp, status);
- /* fall through*/
-out:
- /* Wake up the next rpc task even on error */
- rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
- return status;
-}
-
-static int nfs4_initialize_session(struct nfs_client *clp)
-{
- int status;
+ status = nfs4_recovery_handle_error(clp, status);
- status = nfs4_proc_create_session(clp, 0);
- if (!status) {
- nfs_mark_client_ready(clp, NFS_CS_READY);
- } else if (status == -NFS4ERR_STALE_CLIENTID) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
- } else {
- nfs_mark_client_ready(clp, status);
+out:
+ /*
+ * Let the state manager reestablish state
+ * without waking other tasks yet.
+ */
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
+ /* Wake up the next rpc task */
+ nfs41_end_drain_session(clp, ses);
+ if (status == 0)
+ nfs41_setup_state_renewal(clp);
}
return status;
}
+
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
#endif /* CONFIG_NFS_V4_1 */
/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
@@ -1234,7 +1326,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
status = nfs4_reclaim_lease(clp);
if (status) {
nfs4_set_lease_expired(clp, status);
- if (status == -EAGAIN)
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED,
+ &clp->cl_state))
continue;
if (clp->cl_cons_state ==
NFS_CS_SESSION_INITING)
@@ -1242,55 +1335,51 @@ static void nfs4_state_manager(struct nfs_client *clp)
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
- if (status != 0)
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
+ if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
+ goto out_error;
}
+
/* Initialize or reset the session */
- if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)
+ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
&& nfs4_has_session(clp)) {
- if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
- status = nfs4_initialize_session(clp);
- else
- status = nfs4_reset_session(clp);
- if (status) {
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
+ status = nfs4_reset_session(clp);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
+ if (status < 0)
goto out_error;
- }
}
+
/* First recover reboot state... */
- if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
+ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
continue;
nfs4_state_end_reclaim_reboot(clp);
- continue;
+ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
+ continue;
+ if (status < 0)
+ goto out_error;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
nfs4_nograce_recovery_ops[clp->cl_minorversion]);
- if (status < 0) {
- set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
- if (status == -NFS4ERR_EXPIRED)
- continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP,
- &clp->cl_state))
- continue;
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+ test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+ continue;
+ if (status < 0)
goto out_error;
- } else
- nfs4_state_end_reclaim_nograce(clp);
- continue;
}
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
@@ -1309,8 +1398,6 @@ static void nfs4_state_manager(struct nfs_client *clp)
out_error:
printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
" with error %d\n", clp->cl_hostname, -status);
- if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
- nfs4_state_end_reclaim_reboot(clp);
nfs4_clear_state_manager_bit(clp);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 20b4e30e6c82..e437fd6a819f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -46,11 +46,13 @@
#include <linux/proc_fs.h>
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
+#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -134,7 +136,7 @@ static int nfs4_stat_to_errno(int);
#define decode_lookup_maxsz (op_decode_hdr_maxsz)
#define encode_share_access_maxsz \
(2)
-#define encode_createmode_maxsz (1 + encode_attrs_maxsz)
+#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
@@ -299,6 +301,8 @@ static int nfs4_stat_to_errno(int);
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -676,6 +680,25 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
+#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_reclaim_complete_maxsz)
+#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_reclaim_complete_maxsz)
+
+const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_encode_hdr_maxsz +
+ encode_sequence_maxsz +
+ encode_putfh_maxsz +
+ encode_getattr_maxsz) *
+ XDR_UNIT);
+
+const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_decode_hdr_maxsz +
+ decode_sequence_maxsz +
+ decode_putfh_maxsz) *
+ XDR_UNIT);
#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
@@ -1140,6 +1163,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
+ struct nfs_client *clp;
p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
@@ -1148,8 +1172,23 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
- *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
- encode_nfs4_verifier(xdr, &arg->u.verifier);
+ clp = arg->server->nfs_client;
+ if (clp->cl_minorversion > 0) {
+ if (nfs4_has_persistent_session(clp)) {
+ *p = cpu_to_be32(NFS4_CREATE_GUARDED);
+ encode_attrs(xdr, arg->u.attrs, arg->server);
+ } else {
+ struct iattr dummy;
+
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ dummy.ia_valid = 0;
+ encode_attrs(xdr, &dummy, arg->server);
+ }
+ } else {
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ }
}
}
@@ -1592,6 +1631,19 @@ static void encode_destroy_session(struct xdr_stream *xdr,
hdr->nops++;
hdr->replen += decode_destroy_session_maxsz;
}
+
+static void encode_reclaim_complete(struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE);
+ *p++ = cpu_to_be32(args->one_fs);
+ hdr->nops++;
+ hdr->replen += decode_reclaim_complete_maxsz;
+}
#endif /* CONFIG_NFS_V4_1 */
static void encode_sequence(struct xdr_stream *xdr,
@@ -2096,7 +2148,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
encode_compound_hdr(&xdr, req, &hdr);
encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
- replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
+ replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
@@ -2420,6 +2472,26 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
encode_nops(&hdr);
return 0;
}
+
+/*
+ * a RECLAIM_COMPLETE request
+ */
+static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_reclaim_complete_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args)
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_reclaim_complete(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -4528,6 +4600,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_DESTROY_SESSION);
}
+
+static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
+}
#endif /* CONFIG_NFS_V4_1 */
static int decode_sequence(struct xdr_stream *xdr,
@@ -4583,8 +4660,8 @@ static int decode_sequence(struct xdr_stream *xdr,
dummy = be32_to_cpup(p++);
/* target highest slot id - currently not processed */
dummy = be32_to_cpup(p++);
- /* result flags - currently not processed */
- dummy = be32_to_cpup(p);
+ /* result flags */
+ res->sr_status_flags = be32_to_cpup(p);
status = 0;
out_err:
res->sr_status = status;
@@ -5309,7 +5386,7 @@ out:
}
/*
- * FSINFO request
+ * Decode FSINFO response
*/
static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
struct nfs4_fsinfo_res *res)
@@ -5330,7 +5407,7 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
}
/*
- * PATHCONF request
+ * Decode PATHCONF response
*/
static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
struct nfs4_pathconf_res *res)
@@ -5351,7 +5428,7 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
}
/*
- * STATFS request
+ * Decode STATFS response
*/
static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
struct nfs4_statfs_res *res)
@@ -5372,7 +5449,7 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
}
/*
- * GETATTR_BITMAP request
+ * Decode GETATTR_BITMAP response
*/
static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
{
@@ -5411,7 +5488,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
}
/*
- * a SETCLIENTID request
+ * Decode SETCLIENTID response
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
struct nfs_client *clp)
@@ -5428,7 +5505,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
}
/*
- * a SETCLIENTID_CONFIRM request
+ * Decode SETCLIENTID_CONFIRM response
*/
static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
{
@@ -5448,7 +5525,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
}
/*
- * DELEGRETURN request
+ * Decode DELEGRETURN response
*/
static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
{
@@ -5474,7 +5551,7 @@ out:
}
/*
- * FS_LOCATIONS request
+ * Decode FS_LOCATIONS response
*/
static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
struct nfs4_fs_locations_res *res)
@@ -5504,7 +5581,7 @@ out:
#if defined(CONFIG_NFS_V4_1)
/*
- * EXCHANGE_ID request
+ * Decode EXCHANGE_ID response
*/
static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
void *res)
@@ -5521,7 +5598,7 @@ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a CREATE_SESSION request
+ * Decode CREATE_SESSION response
*/
static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs41_create_session_res *res)
@@ -5538,7 +5615,7 @@ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a DESTROY_SESSION request
+ * Decode DESTROY_SESSION response
*/
static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
void *dummy)
@@ -5555,7 +5632,7 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a SEQUENCE request
+ * Decode SEQUENCE response
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs4_sequence_res *res)
@@ -5572,7 +5649,7 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a GET_LEASE_TIME request
+ * Decode GET_LEASE_TIME response
*/
static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs4_get_lease_time_res *res)
@@ -5591,6 +5668,25 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
status = decode_fsinfo(&xdr, res->lr_fsinfo);
return status;
}
+
+/*
+ * Decode RECLAIM_COMPLETE response
+ */
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs41_reclaim_complete_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (!status)
+ status = decode_reclaim_complete(&xdr, (void *)NULL);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
@@ -5767,6 +5863,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
PROC(SEQUENCE, enc_sequence, dec_sequence),
PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 12c9e66d3f1d..db9b360ae19d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -356,25 +356,19 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- goto out;
+ return;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- goto out;
+ return;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
- return;
-out:
- nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
- &data->res.seq_res);
- return;
-
+ nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
}
/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 90be551b80c1..ce907efc5508 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -175,14 +175,16 @@ static const match_table_t nfs_mount_option_tokens = {
};
enum {
- Opt_xprt_udp, Opt_xprt_tcp, Opt_xprt_rdma,
+ Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
Opt_xprt_err
};
static const match_table_t nfs_xprt_protocol_tokens = {
{ Opt_xprt_udp, "udp" },
+ { Opt_xprt_udp6, "udp6" },
{ Opt_xprt_tcp, "tcp" },
+ { Opt_xprt_tcp6, "tcp6" },
{ Opt_xprt_rdma, "rdma" },
{ Opt_xprt_err, NULL }
@@ -492,6 +494,45 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
return sec_flavours[i].str;
}
+static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address;
+
+ seq_printf(m, ",mountproto=");
+ switch (sap->sa_family) {
+ case AF_INET:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ case AF_INET6:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP6);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP6);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+}
+
static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
int showdefaults)
{
@@ -505,7 +546,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
}
case AF_INET6: {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
- seq_printf(m, ",mountaddr=%pI6", &sin6->sin6_addr);
+ seq_printf(m, ",mountaddr=%pI6c", &sin6->sin6_addr);
break;
}
default:
@@ -518,17 +559,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
if (nfss->mountd_port || showdefaults)
seq_printf(m, ",mountport=%u", nfss->mountd_port);
- switch (nfss->mountd_protocol) {
- case IPPROTO_UDP:
- seq_printf(m, ",mountproto=udp");
- break;
- case IPPROTO_TCP:
- seq_printf(m, ",mountproto=tcp");
- break;
- default:
- if (showdefaults)
- seq_printf(m, ",mountproto=auto");
- }
+ nfs_show_mountd_netid(m, nfss, showdefaults);
}
/*
@@ -578,7 +609,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_puts(m, nfs_infop->nostr);
}
seq_printf(m, ",proto=%s",
- rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO));
+ rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID));
if (version == 4) {
if (nfss->port != NFS_PORT)
seq_printf(m, ",port=%u", nfss->port);
@@ -714,8 +745,6 @@ static void nfs_umount_begin(struct super_block *sb)
struct nfs_server *server;
struct rpc_clnt *rpc;
- lock_kernel();
-
server = NFS_SB(sb);
/* -EIO all pending I/O */
rpc = server->client_acl;
@@ -724,8 +753,6 @@ static void nfs_umount_begin(struct super_block *sb)
rpc = server->client;
if (!IS_ERR(rpc))
rpc_killall_tasks(rpc);
-
- unlock_kernel();
}
static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
@@ -734,8 +761,6 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data) {
- data->rsize = NFS_MAX_FILE_IO_SIZE;
- data->wsize = NFS_MAX_FILE_IO_SIZE;
data->acregmin = NFS_DEF_ACREGMIN;
data->acregmax = NFS_DEF_ACREGMAX;
data->acdirmin = NFS_DEF_ACDIRMIN;
@@ -887,6 +912,8 @@ static int nfs_parse_mount_options(char *raw,
{
char *p, *string, *secdata;
int rc, sloppy = 0, invalid_option = 0;
+ unsigned short protofamily = AF_UNSPEC;
+ unsigned short mountfamily = AF_UNSPEC;
if (!raw) {
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -1232,12 +1259,17 @@ static int nfs_parse_mount_options(char *raw,
token = match_token(string,
nfs_xprt_protocol_tokens, args);
+ protofamily = AF_INET;
switch (token) {
+ case Opt_xprt_udp6:
+ protofamily = AF_INET6;
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
kfree(string);
break;
+ case Opt_xprt_tcp6:
+ protofamily = AF_INET6;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
@@ -1265,10 +1297,15 @@ static int nfs_parse_mount_options(char *raw,
nfs_xprt_protocol_tokens, args);
kfree(string);
+ mountfamily = AF_INET;
switch (token) {
+ case Opt_xprt_udp6:
+ mountfamily = AF_INET6;
case Opt_xprt_udp:
mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
break;
+ case Opt_xprt_tcp6:
+ mountfamily = AF_INET6;
case Opt_xprt_tcp:
mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
break;
@@ -1367,8 +1404,33 @@ static int nfs_parse_mount_options(char *raw,
if (!sloppy && invalid_option)
return 0;
+ /*
+ * verify that any proto=/mountproto= options match the address
+ * familiies in the addr=/mountaddr= options.
+ */
+ if (protofamily != AF_UNSPEC &&
+ protofamily != mnt->nfs_server.address.ss_family)
+ goto out_proto_mismatch;
+
+ if (mountfamily != AF_UNSPEC) {
+ if (mnt->mount_server.addrlen) {
+ if (mountfamily != mnt->mount_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ } else {
+ if (mountfamily != mnt->nfs_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ }
+ }
+
return 1;
+out_mountproto_mismatch:
+ printk(KERN_INFO "NFS: mount server address does not match mountproto= "
+ "option\n");
+ return 0;
+out_proto_mismatch:
+ printk(KERN_INFO "NFS: server address does not match proto= option\n");
+ return 0;
out_invalid_address:
printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
return 0;
@@ -1881,7 +1943,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
if (data == NULL)
return -ENOMEM;
- lock_kernel();
/* fill out struct with values from existing mount */
data->flags = nfss->flags;
data->rsize = nfss->rsize;
@@ -1907,7 +1968,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
error = nfs_compare_remount_data(nfss, data);
out:
kfree(data);
- unlock_kernel();
return error;
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1064c91ae810..6da3d3ff6edd 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -83,7 +83,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b1ce2ea9b93b..d171696017f4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1216,7 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1228,7 +1228,6 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
- nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
@@ -1612,15 +1611,16 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
if (ret)
goto out_unlock;
page_cache_get(newpage);
+ spin_lock(&mapping->host->i_lock);
req->wb_page = newpage;
SetPagePrivate(newpage);
- set_page_private(newpage, page_private(page));
+ set_page_private(newpage, (unsigned long)req);
ClearPagePrivate(page);
set_page_private(page, 0);
+ spin_unlock(&mapping->host->i_lock);
page_cache_release(page);
out_unlock:
nfs_clear_page_tag_locked(req);
- nfs_release_request(req);
out:
return ret;
}
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
index 251da07b2a1d..1225af7b2166 100644
--- a/fs/nilfs2/Kconfig
+++ b/fs/nilfs2/Kconfig
@@ -2,6 +2,7 @@ config NILFS2_FS
tristate "NILFS2 file system support (EXPERIMENTAL)"
depends on EXPERIMENTAL
select CRC32
+ select FS_JOURNAL_INFO
help
NILFS2 is a log-structured file system (LFS) supporting continuous
snapshotting. In addition to versioning capability of the entire
diff --git a/fs/proc/base.c b/fs/proc/base.c
index af643b5aefe8..4df4a464a919 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1265,6 +1265,72 @@ static const struct file_operations proc_pid_sched_operations = {
#endif
+static ssize_t comm_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct task_struct *p;
+ char buffer[TASK_COMM_LEN];
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ if (same_thread_group(current, p))
+ set_task_comm(p, buffer);
+ else
+ count = -EINVAL;
+
+ put_task_struct(p);
+
+ return count;
+}
+
+static int comm_show(struct seq_file *m, void *v)
+{
+ struct inode *inode = m->private;
+ struct task_struct *p;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ task_lock(p);
+ seq_printf(m, "%s\n", p->comm);
+ task_unlock(p);
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static int comm_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ ret = single_open(filp, comm_show, NULL);
+ if (!ret) {
+ struct seq_file *m = filp->private_data;
+
+ m->private = inode;
+ }
+ return ret;
+}
+
+static const struct file_operations proc_pid_set_comm_operations = {
+ .open = comm_open,
+ .read = seq_read,
+ .write = comm_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* We added or removed a vma mapping the executable. The vmas are only mapped
* during exec and are not mapped with the mmap system call.
@@ -2504,6 +2570,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
INF("syscall", S_IRUSR, proc_pid_syscall),
#endif
@@ -2838,6 +2905,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
INF("syscall", S_IRUSR, proc_pid_syscall),
#endif
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9203c6..47c03f4336b8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return err;
}
+static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+{
+ u64 pme = 0;
+ if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
+static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+ struct hstate *hs = NULL;
+ int err = 0;
+
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ for (; addr != end; addr += PAGE_SIZE) {
+ u64 pfn = PM_NOT_PRESENT;
+
+ if (vma && (addr >= vma->vm_end)) {
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ }
+
+ if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
+ /* calculate pfn of the "raw" page in the hugepage. */
+ int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
+ pfn = huge_pte_to_pagemap_entry(*pte, offset);
+ }
+ err = add_to_pagemap(addr, pfn, pm);
+ if (err)
+ return err;
+ }
+
+ cond_resched();
+
+ return err;
+}
+
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
+ pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
pagemap_walk.mm = mm;
pagemap_walk.private = &pm;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8f5c05d3dbd3..5d9fd64ef81a 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -110,9 +110,13 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
}
}
- size += (*text = mm->end_code - mm->start_code);
- size += (*data = mm->start_stack - mm->start_data);
+ *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
+ >> PAGE_SHIFT;
+ *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
+ >> PAGE_SHIFT;
up_read(&mm->mmap_sem);
+ size >>= PAGE_SHIFT;
+ size += *text + *data;
*resident = size;
return size;
}
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
index 513f431038f9..ac7cd75c86f8 100644
--- a/fs/reiserfs/Kconfig
+++ b/fs/reiserfs/Kconfig
@@ -1,6 +1,7 @@
config REISERFS_FS
tristate "Reiserfs support"
select CRC32
+ select FS_JOURNAL_INFO
help
Stores not just filenames but the files themselves in a balanced
tree. Uses journalling.
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 8a771c59ac3e..90492327b383 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -350,13 +350,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
le32_to_cpu(sup->fmt_version));
printk(KERN_DEBUG "\ttime_gran %u\n",
le32_to_cpu(sup->time_gran));
- printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X"
- "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
- sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3],
- sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7],
- sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11],
- sup->uuid[12], sup->uuid[13], sup->uuid[14],
- sup->uuid[15]);
+ printk(KERN_DEBUG "\tUUID %pUB\n",
+ sup->uuid);
break;
}
case UBIFS_MST_NODE:
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 943ad5624530..43f9d19a6f33 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1393,12 +1393,7 @@ static int mount_ubifs(struct ubifs_info *c)
c->leb_size, c->leb_size >> 10);
dbg_msg("data journal heads: %d",
c->jhead_cnt - NONDATA_JHEADS_CNT);
- dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X"
- "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
- c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3],
- c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
- c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
- c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
+ dbg_msg("UUID: %pUB", c->uuid);
dbg_msg("big_lpt %d", c->big_lpt);
dbg_msg("log LEBs: %d (%d - %d)",
c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1e068535b58b..82372e332f08 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -440,7 +440,7 @@ static void udf_table_free_blocks(struct super_block *sb,
(bloc->logicalBlockNum + count) >
partmap->s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
- bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+ bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
partmap->s_partition_len);
goto error_return;
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index b80cbd78833c..f311d509b6a3 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -196,6 +196,7 @@ static int udf_release_file(struct inode *inode, struct file *filp)
mutex_lock(&inode->i_mutex);
lock_kernel();
udf_discard_prealloc(inode);
+ udf_truncate_tail_extent(inode);
unlock_kernel();
mutex_unlock(&inode->i_mutex);
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6d24c2c63f93..f90231eb2916 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -97,15 +97,17 @@ no_delete:
*/
void udf_clear_inode(struct inode *inode)
{
- struct udf_inode_info *iinfo;
- if (!(inode->i_sb->s_flags & MS_RDONLY)) {
- lock_kernel();
- udf_truncate_tail_extent(inode);
- unlock_kernel();
- write_inode_now(inode, 0);
- invalidate_inode_buffers(inode);
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
+ inode->i_size != iinfo->i_lenExtents) {
+ printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
+ "inode size %llu different from extent lenght %llu. "
+ "Filesystem need not be standards compliant.\n",
+ inode->i_sb->s_id, inode->i_ino, inode->i_mode,
+ (unsigned long long)inode->i_size,
+ (unsigned long long)iinfo->i_lenExtents);
}
- iinfo = UDF_I(inode);
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
}
@@ -198,7 +200,6 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
int newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
- uint32_t elen;
uint8_t alloctype;
struct extent_position epos;
@@ -273,12 +274,11 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
eloc.logicalBlockNum = *block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
- elen = inode->i_sb->s_blocksize;
- iinfo->i_lenExtents = elen;
+ iinfo->i_lenExtents = inode->i_size;
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
- udf_add_aext(inode, &epos, &eloc, elen, 0);
+ udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
/* UniqueID stuff */
brelse(epos.bh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 21dad8c608f9..cd2115060fdc 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -408,15 +408,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
}
add:
- /* Is there any extent whose size we need to round up? */
- if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && elen) {
- elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
- if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(struct short_ad);
- else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(struct long_ad);
- udf_write_aext(dir, &epos, &eloc, elen, 1);
- }
f_pos += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
@@ -439,6 +430,7 @@ add:
udf_current_aext(dir, &epos, &eloc, &elen, 1);
}
+ /* Entry fits into current block? */
if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
@@ -462,6 +454,16 @@ add:
(fibh->sbh->b_data + fibh->soffset);
}
} else {
+ /* Round up last extent in the file */
+ elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
+ if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ epos.offset -= sizeof(struct short_ad);
+ else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ epos.offset -= sizeof(struct long_ad);
+ udf_write_aext(dir, &epos, &eloc, elen, 1);
+ dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize
+ - 1) & ~(sb->s_blocksize - 1);
+
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
if (fibh->sbh != fibh->ebh) {
@@ -508,6 +510,20 @@ add:
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
+ else {
+ /* Find the last extent and truncate it to proper size */
+ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+ (EXT_RECORDED_ALLOCATED >> 30))
+ ;
+ elen -= dinfo->i_lenExtents - dir->i_size;
+ if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ epos.offset -= sizeof(struct short_ad);
+ else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ epos.offset -= sizeof(struct long_ad);
+ udf_write_aext(dir, &epos, &eloc, elen, 1);
+ dinfo->i_lenExtents = dir->i_size;
+ }
+
mark_inode_dirty(dir);
goto out_ok;
} else {
@@ -922,7 +938,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
block = udf_get_pblock(inode->i_sb, block,
iinfo->i_location.partitionReferenceNum,
0);
- epos.bh = udf_tread(inode->i_sb, block);
+ epos.bh = udf_tgetblk(inode->i_sb, block);
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(epos.bh);
@@ -999,6 +1015,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
inode->i_size = elen;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
iinfo->i_lenAlloc = inode->i_size;
+ else
+ udf_truncate_tail_extent(inode);
mark_inode_dirty(inode);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9d1b8c2e6c45..1e4543cbcd27 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct super_block *sb,
return 0;
}
-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+static void udf_find_vat_block(struct super_block *sb, int p_index,
+ int type1_index, sector_t start_block)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ sector_t vat_block;
struct kernel_lb_addr ino;
+
+ /*
+ * VAT file entry is in the last recorded block. Some broken disks have
+ * it a few blocks before so try a bit harder...
+ */
+ ino.partitionReferenceNum = type1_index;
+ for (vat_block = start_block;
+ vat_block >= map->s_partition_root &&
+ vat_block >= start_block - 3 &&
+ !sbi->s_vat_inode; vat_block--) {
+ ino.logicalBlockNum = vat_block - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, &ino);
+ }
+}
+
+static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map = &sbi->s_partmaps[p_index];
struct buffer_head *bh = NULL;
struct udf_inode_info *vati;
uint32_t pos;
struct virtualAllocationTable20 *vat20;
sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
- /* VAT file entry is in the last recorded block */
- ino.partitionReferenceNum = type1_index;
- ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
if (!sbi->s_vat_inode &&
sbi->s_last_block != blocks - 1) {
printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
"block of the device (%lu).\n",
(unsigned long)sbi->s_last_block,
(unsigned long)blocks - 1);
- ino.partitionReferenceNum = type1_index;
- ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
}
if (!sbi->s_vat_inode)
return 1;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 7a59daed1782..56641fe52a23 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -26,6 +26,8 @@ endif
obj-$(CONFIG_XFS_FS) += xfs.o
+xfs-y += linux-2.6/xfs_trace.o
+
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_dquot.o \
xfs_dquot_item.o \
@@ -90,8 +92,7 @@ xfs-y += xfs_alloc.o \
xfs_rw.o \
xfs_dmops.o
-xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
- xfs_dir2_trace.o
+xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
# Objects in linux/
xfs-y += $(addprefix $(XFS_LINUX)/, \
@@ -113,6 +114,3 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
xfs-y += $(addprefix support/, \
debug.o \
uuid.o)
-
-xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
-
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b23a54506446..69e598b6986f 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -21,6 +21,7 @@
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 87813e405cef..d798c54296eb 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,6 +38,7 @@
#include "xfs_rw.h"
#include "xfs_iomap.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/mpage.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
@@ -76,7 +77,7 @@ xfs_ioend_wake(
wake_up(to_ioend_wq(ip));
}
-STATIC void
+void
xfs_count_page_state(
struct page *page,
int *delalloc,
@@ -98,48 +99,6 @@ xfs_count_page_state(
} while ((bh = bh->b_this_page) != head);
}
-#if defined(XFS_RW_TRACE)
-void
-xfs_page_trace(
- int tag,
- struct inode *inode,
- struct page *page,
- unsigned long pgoff)
-{
- xfs_inode_t *ip;
- loff_t isize = i_size_read(inode);
- loff_t offset = page_offset(page);
- int delalloc = -1, unmapped = -1, unwritten = -1;
-
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-
- ip = XFS_I(inode);
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)inode,
- (void *)page,
- (void *)pgoff,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
- (void *)((unsigned long)(isize & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)delalloc),
- (void *)((unsigned long)unmapped),
- (void *)((unsigned long)unwritten),
- (void *)((unsigned long)current_pid()),
- (void *)NULL);
-}
-#else
-#define xfs_page_trace(tag, inode, page, pgoff)
-#endif
-
STATIC struct block_device *
xfs_find_bdev_for_inode(
struct xfs_inode *ip)
@@ -1202,7 +1161,7 @@ xfs_vm_writepage(
int delalloc, unmapped, unwritten;
struct inode *inode = page->mapping->host;
- xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
+ trace_xfs_writepage(inode, page, 0);
/*
* We need a transaction if:
@@ -1307,7 +1266,7 @@ xfs_vm_releasepage(
.nr_to_write = 1,
};
- xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
+ trace_xfs_releasepage(inode, page, 0);
if (!page_has_buffers(page))
return 0;
@@ -1587,8 +1546,7 @@ xfs_vm_invalidatepage(
struct page *page,
unsigned long offset)
{
- xfs_page_trace(XFS_INVALIDPAGE_ENTER,
- page->mapping->host, page, offset);
+ trace_xfs_invalidatepage(page->mapping->host, page, offset);
block_invalidatepage(page, offset);
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 221b3e66ceef..4cfc6ea87df8 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -45,4 +45,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);
+extern void xfs_count_page_state(struct page *, int *, int *, int *);
+
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4ddc973aea7a..b4c7d4248aac 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,6 +39,7 @@
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
+#include "xfs_trace.h"
static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *);
@@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
struct workqueue_struct *xfsconvertd_workqueue;
-#ifdef XFS_BUF_TRACE
-void
-xfs_buf_trace(
- xfs_buf_t *bp,
- char *id,
- void *data,
- void *ra)
-{
- ktrace_enter(xfs_buf_trace_buf,
- bp, id,
- (void *)(unsigned long)bp->b_flags,
- (void *)(unsigned long)bp->b_hold.counter,
- (void *)(unsigned long)bp->b_sema.count,
- (void *)current,
- data, ra,
- (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
- (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
- (void *)(unsigned long)bp->b_buffer_length,
- NULL, NULL, NULL, NULL, NULL);
-}
-ktrace_t *xfs_buf_trace_buf;
-#define XFS_BUF_TRACE_SIZE 4096
-#define XB_TRACE(bp, id, data) \
- xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
-#else
-#define XB_TRACE(bp, id, data) do { } while (0)
-#endif
-
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -279,7 +252,8 @@ _xfs_buf_initialize(
init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(xb_create);
- XB_TRACE(bp, "initialize", target);
+
+ trace_xfs_buf_init(bp, _RET_IP_);
}
/*
@@ -332,7 +306,7 @@ void
xfs_buf_free(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "free", 0);
+ trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_hash_list));
@@ -445,7 +419,6 @@ _xfs_buf_lookup_pages(
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
- XB_TRACE(bp, "lookup_pages", (long)page_count);
return error;
}
@@ -548,7 +521,6 @@ found:
if (down_trylock(&bp->b_sema)) {
if (!(flags & XBF_TRYLOCK)) {
/* wait for buffer ownership */
- XB_TRACE(bp, "get_lock", 0);
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} else {
@@ -571,7 +543,8 @@ found:
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= XBF_MAPPED;
}
- XB_TRACE(bp, "got_lock", 0);
+
+ trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(xb_get_locked);
return bp;
}
@@ -627,7 +600,7 @@ xfs_buf_get(
bp->b_bn = ioff;
bp->b_count_desired = bp->b_buffer_length;
- XB_TRACE(bp, "get", (unsigned long)flags);
+ trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
no_buffer:
@@ -644,8 +617,6 @@ _xfs_buf_read(
{
int status;
- XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
-
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
@@ -673,19 +644,18 @@ xfs_buf_read(
bp = xfs_buf_get(target, ioff, isize, flags);
if (bp) {
+ trace_xfs_buf_read(bp, flags, _RET_IP_);
+
if (!XFS_BUF_ISDONE(bp)) {
- XB_TRACE(bp, "read", (unsigned long)flags);
XFS_STATS_INC(xb_get_read);
_xfs_buf_read(bp, flags);
} else if (flags & XBF_ASYNC) {
- XB_TRACE(bp, "read_async", (unsigned long)flags);
/*
* Read ahead call which is already satisfied,
* drop the buffer
*/
goto no_buffer;
} else {
- XB_TRACE(bp, "read_done", (unsigned long)flags);
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
}
@@ -823,7 +793,7 @@ xfs_buf_get_noaddr(
xfs_buf_unlock(bp);
- XB_TRACE(bp, "no_daddr", len);
+ trace_xfs_buf_get_noaddr(bp, _RET_IP_);
return bp;
fail_free_mem:
@@ -845,8 +815,8 @@ void
xfs_buf_hold(
xfs_buf_t *bp)
{
+ trace_xfs_buf_hold(bp, _RET_IP_);
atomic_inc(&bp->b_hold);
- XB_TRACE(bp, "hold", 0);
}
/*
@@ -859,7 +829,7 @@ xfs_buf_rele(
{
xfs_bufhash_t *hash = bp->b_hash;
- XB_TRACE(bp, "rele", bp->b_relse);
+ trace_xfs_buf_rele(bp, _RET_IP_);
if (unlikely(!hash)) {
ASSERT(!bp->b_relse);
@@ -909,21 +879,19 @@ xfs_buf_cond_lock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked) {
+ if (locked)
XB_SET_OWNER(bp);
- }
- XB_TRACE(bp, "cond_lock", (long)locked);
+
+ trace_xfs_buf_cond_lock(bp, _RET_IP_);
return locked ? 0 : -EBUSY;
}
-#if defined(DEBUG) || defined(XFS_BLI_TRACE)
int
xfs_buf_lock_value(
xfs_buf_t *bp)
{
return bp->b_sema.count;
}
-#endif
/*
* Locks a buffer object.
@@ -935,12 +903,14 @@ void
xfs_buf_lock(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "lock", 0);
+ trace_xfs_buf_lock(bp, _RET_IP_);
+
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema);
XB_SET_OWNER(bp);
- XB_TRACE(bp, "locked", 0);
+
+ trace_xfs_buf_lock_done(bp, _RET_IP_);
}
/*
@@ -962,7 +932,8 @@ xfs_buf_unlock(
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
- XB_TRACE(bp, "unlock", 0);
+
+ trace_xfs_buf_unlock(bp, _RET_IP_);
}
@@ -974,17 +945,18 @@ void
xfs_buf_pin(
xfs_buf_t *bp)
{
+ trace_xfs_buf_pin(bp, _RET_IP_);
atomic_inc(&bp->b_pin_count);
- XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
}
void
xfs_buf_unpin(
xfs_buf_t *bp)
{
+ trace_xfs_buf_unpin(bp, _RET_IP_);
+
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
- XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
}
int
@@ -1035,7 +1007,7 @@ xfs_buf_iodone_work(
*/
if ((bp->b_error == EOPNOTSUPP) &&
(bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
- XB_TRACE(bp, "ordered_retry", bp->b_iodone);
+ trace_xfs_buf_ordered_retry(bp, _RET_IP_);
bp->b_flags &= ~XBF_ORDERED;
bp->b_flags |= _XFS_BARRIER_FAILED;
xfs_buf_iorequest(bp);
@@ -1050,12 +1022,12 @@ xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
+ trace_xfs_buf_iodone(bp, _RET_IP_);
+
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
if (bp->b_error == 0)
bp->b_flags |= XBF_DONE;
- XB_TRACE(bp, "iodone", bp->b_iodone);
-
if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
@@ -1075,7 +1047,7 @@ xfs_buf_ioerror(
{
ASSERT(error >= 0 && error <= 0xffff);
bp->b_error = (unsigned short)error;
- XB_TRACE(bp, "ioerror", (unsigned long)error);
+ trace_xfs_buf_ioerror(bp, error, _RET_IP_);
}
int
@@ -1083,7 +1055,7 @@ xfs_bawrite(
void *mp,
struct xfs_buf *bp)
{
- XB_TRACE(bp, "bawrite", 0);
+ trace_xfs_buf_bawrite(bp, _RET_IP_);
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
@@ -1102,7 +1074,7 @@ xfs_bdwrite(
void *mp,
struct xfs_buf *bp)
{
- XB_TRACE(bp, "bdwrite", 0);
+ trace_xfs_buf_bdwrite(bp, _RET_IP_);
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
@@ -1253,7 +1225,7 @@ int
xfs_buf_iorequest(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "iorequest", 0);
+ trace_xfs_buf_iorequest(bp, _RET_IP_);
if (bp->b_flags & XBF_DELWRI) {
xfs_buf_delwri_queue(bp, 1);
@@ -1287,11 +1259,13 @@ int
xfs_buf_iowait(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "iowait", 0);
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
wait_for_completion(&bp->b_iowait);
- XB_TRACE(bp, "iowaited", (long)bp->b_error);
+
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
return bp->b_error;
}
@@ -1604,7 +1578,8 @@ xfs_buf_delwri_queue(
struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
- XB_TRACE(bp, "delwri_q", (long)unlock);
+ trace_xfs_buf_delwri_queue(bp, _RET_IP_);
+
ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
spin_lock(dwlk);
@@ -1644,7 +1619,7 @@ xfs_buf_delwri_dequeue(
if (dequeued)
xfs_buf_rele(bp);
- XB_TRACE(bp, "delwri_dq", (long)dequeued);
+ trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
}
STATIC void
@@ -1692,7 +1667,7 @@ xfs_buf_delwri_split(
INIT_LIST_HEAD(list);
spin_lock(dwlk);
list_for_each_entry_safe(bp, n, dwq, b_list) {
- XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
ASSERT(bp->b_flags & XBF_DELWRI);
if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1816,14 +1791,10 @@ xfs_flush_buftarg(
int __init
xfs_buf_init(void)
{
-#ifdef XFS_BUF_TRACE
- xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
-#endif
-
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
KM_ZONE_HWALIGN, NULL);
if (!xfs_buf_zone)
- goto out_free_trace_buf;
+ goto out;
xfslogd_workqueue = create_workqueue("xfslogd");
if (!xfslogd_workqueue)
@@ -1846,10 +1817,7 @@ xfs_buf_init(void)
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
kmem_zone_destroy(xfs_buf_zone);
- out_free_trace_buf:
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
+ out:
return -ENOMEM;
}
@@ -1861,9 +1829,6 @@ xfs_buf_terminate(void)
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
}
#ifdef CONFIG_KDB_MODULES
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 5f07dd91c5fa..a509f4addc2a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -95,6 +95,28 @@ typedef enum {
_XFS_BARRIER_FAILED = (1 << 23),
} xfs_buf_flags_t;
+#define XFS_BUF_FLAGS \
+ { XBF_READ, "READ" }, \
+ { XBF_WRITE, "WRITE" }, \
+ { XBF_MAPPED, "MAPPED" }, \
+ { XBF_ASYNC, "ASYNC" }, \
+ { XBF_DONE, "DONE" }, \
+ { XBF_DELWRI, "DELWRI" }, \
+ { XBF_STALE, "STALE" }, \
+ { XBF_FS_MANAGED, "FS_MANAGED" }, \
+ { XBF_ORDERED, "ORDERED" }, \
+ { XBF_READ_AHEAD, "READ_AHEAD" }, \
+ { XBF_LOCK, "LOCK" }, /* should never be set */\
+ { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
+ { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
+ { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
+ { _XBF_PAGES, "PAGES" }, \
+ { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
+ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
+ { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \
+ { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
+
+
typedef enum {
XBT_FORCE_SLEEP = 0,
XBT_FORCE_FLUSH = 1,
@@ -243,13 +265,6 @@ extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
-#ifdef XFS_BUF_TRACE
-extern ktrace_t *xfs_buf_trace_buf;
-extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
-#else
-#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
-#endif
-
#define xfs_buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
@@ -365,10 +380,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_bpin(bp) xfs_buf_pin(bp)
#define xfs_bunpin(bp) xfs_buf_unpin(bp)
-
-#define xfs_buftrace(id, bp) \
- xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
-
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 08be36d7326c..7501b85fd860 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -19,6 +19,7 @@
#include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
+#include "xfs_trace.h"
int fs_noerr(void) { return 0; }
int fs_nosys(void) { return ENOSYS; }
@@ -51,6 +52,8 @@ xfs_flushinval_pages(
struct address_space *mapping = VFS_I(ip)->i_mapping;
int ret = 0;
+ trace_xfs_pagecache_inval(ip, first, last);
+
if (mapping->nrpages) {
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = filemap_write_and_wait(mapping);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 5bb523d7f37e..a034cf624437 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -51,6 +51,7 @@
#include "xfs_quota.h"
#include "xfs_inode_item.h"
#include "xfs_export.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/dcache.h>
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index eafcc7c18706..be1527b1670c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -46,6 +46,7 @@
#include "xfs_attr.h"
#include "xfs_ioctl.h"
#include "xfs_ioctl32.h"
+#include "xfs_trace.h"
#define _NATIVE_IOC(cmd, type) \
_IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 1f3b4b8f7dd4..1d5b298ba8b2 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -47,6 +47,7 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/xattr.h>
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 6127e24062d0..5af0c81ca1ae 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -40,7 +40,6 @@
#include <sv.h>
#include <time.h>
-#include <support/ktrace.h>
#include <support/debug.h>
#include <support/uuid.h>
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 1bf47f219c97..0d32457abef1 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -48,73 +48,12 @@
#include "xfs_utils.h"
#include "xfs_iomap.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/writeback.h>
-#if defined(XFS_RW_TRACE)
-void
-xfs_rw_enter_trace(
- int tag,
- xfs_inode_t *ip,
- void *data,
- size_t segs,
- loff_t offset,
- int ioflags)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(unsigned long)tag,
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)data,
- (void *)((unsigned long)segs),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)ioflags),
- (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-
-void
-xfs_inval_cached_trace(
- xfs_inode_t *ip,
- xfs_off_t offset,
- xfs_off_t len,
- xfs_off_t first,
- xfs_off_t last)
-{
-
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_INVAL_CACHED,
- (void *)ip,
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)((len >> 32) & 0xffffffff)),
- (void *)((unsigned long)(len & 0xffffffff)),
- (void *)((unsigned long)((first >> 32) & 0xffffffff)),
- (void *)((unsigned long)(first & 0xffffffff)),
- (void *)((unsigned long)((last >> 32) & 0xffffffff)),
- (void *)((unsigned long)(last & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-#endif
-
/*
* xfs_iozero
*
@@ -250,8 +189,7 @@ xfs_read(
}
}
- xfs_rw_enter_trace(XFS_READ_ENTER, ip,
- (void *)iovp, segs, *offset, ioflags);
+ trace_xfs_file_read(ip, size, *offset, ioflags);
iocb->ki_pos = *offset;
ret = generic_file_aio_read(iocb, iovp, segs, *offset);
@@ -292,8 +230,9 @@ xfs_splice_read(
return -error;
}
}
- xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
- pipe, count, *ppos, ioflags);
+
+ trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
+
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
@@ -342,8 +281,8 @@ xfs_splice_write(
ip->i_new_size = new_size;
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
- pipe, count, *ppos, ioflags);
+ trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
+
ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_write_bytes, ret);
@@ -710,8 +649,6 @@ start:
if ((ioflags & IO_ISDIRECT)) {
if (mapping->nrpages) {
WARN_ON(need_i_mutex == 0);
- xfs_inval_cached_trace(xip, pos, -1,
- (pos & PAGE_CACHE_MASK), -1);
error = xfs_flushinval_pages(xip,
(pos & PAGE_CACHE_MASK),
-1, FI_REMAPF_LOCKED);
@@ -728,8 +665,7 @@ start:
need_i_mutex = 0;
}
- xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
+ trace_xfs_file_direct_write(xip, count, *offset, ioflags);
ret = generic_file_direct_write(iocb, iovp,
&segs, pos, offset, count, ocount);
@@ -752,8 +688,7 @@ start:
ssize_t ret2 = 0;
write_retry:
- xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
+ trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
ret2 = generic_file_buffered_write(iocb, iovp, segs,
pos, offset, count, ret);
/*
@@ -858,7 +793,7 @@ int
xfs_bdstrat_cb(struct xfs_buf *bp)
{
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
/*
* Metadata write that didn't get logged but
* written delayed anyway. These aren't associated
@@ -891,7 +826,7 @@ xfsbdstrat(
return;
}
- xfs_buftrace("XFSBDSTRAT IOERROR", bp);
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
xfs_bioerror_relse(bp);
}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index e6be37dbd0e9..d1f7789c7ffb 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -20,52 +20,7 @@
struct xfs_mount;
struct xfs_inode;
-struct xfs_bmbt_irec;
struct xfs_buf;
-struct xfs_iomap;
-
-#if defined(XFS_RW_TRACE)
-/*
- * Defines for the trace mechanisms in xfs_lrw.c.
- */
-#define XFS_RW_KTRACE_SIZE 128
-
-#define XFS_READ_ENTER 1
-#define XFS_WRITE_ENTER 2
-#define XFS_IOMAP_READ_ENTER 3
-#define XFS_IOMAP_WRITE_ENTER 4
-#define XFS_IOMAP_READ_MAP 5
-#define XFS_IOMAP_WRITE_MAP 6
-#define XFS_IOMAP_WRITE_NOSPACE 7
-#define XFS_ITRUNC_START 8
-#define XFS_ITRUNC_FINISH1 9
-#define XFS_ITRUNC_FINISH2 10
-#define XFS_CTRUNC1 11
-#define XFS_CTRUNC2 12
-#define XFS_CTRUNC3 13
-#define XFS_CTRUNC4 14
-#define XFS_CTRUNC5 15
-#define XFS_CTRUNC6 16
-#define XFS_BUNMAP 17
-#define XFS_INVAL_CACHED 18
-#define XFS_DIORD_ENTER 19
-#define XFS_DIOWR_ENTER 20
-#define XFS_WRITEPAGE_ENTER 22
-#define XFS_RELEASEPAGE_ENTER 23
-#define XFS_INVALIDPAGE_ENTER 24
-#define XFS_IOMAP_ALLOC_ENTER 25
-#define XFS_IOMAP_ALLOC_MAP 26
-#define XFS_IOMAP_UNWRITTEN 27
-#define XFS_SPLICE_READ_ENTER 28
-#define XFS_SPLICE_WRITE_ENTER 29
-extern void xfs_rw_enter_trace(int, struct xfs_inode *,
- void *, size_t, loff_t, int);
-extern void xfs_inval_cached_trace(struct xfs_inode *,
- xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
-#else
-#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags)
-#define xfs_inval_cached_trace(ip, offset, len, first, last)
-#endif
/* errors from xfsbdstrat() must be extracted from the buffer */
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1bfb0e980193..09783cc444ac 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -15,6 +15,7 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
#include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
@@ -52,11 +53,11 @@
#include "xfs_trans_priv.h"
#include "xfs_filestream.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_trace.h"
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
#include "xfs_inode_item.h"
#include "xfs_sync.h"
+#include "xfs_trace.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -1525,8 +1526,6 @@ xfs_fs_fill_super(
goto fail_vnrele;
kfree(mtpt);
-
- xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
return 0;
out_filestream_unmount:
@@ -1602,94 +1601,6 @@ static struct file_system_type xfs_fs_type = {
};
STATIC int __init
-xfs_alloc_trace_bufs(void)
-{
-#ifdef XFS_ALLOC_TRACE
- xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_alloc_trace_buf)
- goto out;
-#endif
-#ifdef XFS_BMAP_TRACE
- xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_bmap_trace_buf)
- goto out_free_alloc_trace;
-#endif
-#ifdef XFS_BTREE_TRACE
- xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE,
- KM_MAYFAIL);
- if (!xfs_allocbt_trace_buf)
- goto out_free_bmap_trace;
-
- xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_inobt_trace_buf)
- goto out_free_allocbt_trace;
-
- xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_bmbt_trace_buf)
- goto out_free_inobt_trace;
-#endif
-#ifdef XFS_ATTR_TRACE
- xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_attr_trace_buf)
- goto out_free_bmbt_trace;
-#endif
-#ifdef XFS_DIR2_TRACE
- xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
- if (!xfs_dir2_trace_buf)
- goto out_free_attr_trace;
-#endif
-
- return 0;
-
-#ifdef XFS_DIR2_TRACE
- out_free_attr_trace:
-#endif
-#ifdef XFS_ATTR_TRACE
- ktrace_free(xfs_attr_trace_buf);
- out_free_bmbt_trace:
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(xfs_bmbt_trace_buf);
- out_free_inobt_trace:
- ktrace_free(xfs_inobt_trace_buf);
- out_free_allocbt_trace:
- ktrace_free(xfs_allocbt_trace_buf);
- out_free_bmap_trace:
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(xfs_bmap_trace_buf);
- out_free_alloc_trace:
-#endif
-#ifdef XFS_ALLOC_TRACE
- ktrace_free(xfs_alloc_trace_buf);
- out:
-#endif
- return -ENOMEM;
-}
-
-STATIC void
-xfs_free_trace_bufs(void)
-{
-#ifdef XFS_DIR2_TRACE
- ktrace_free(xfs_dir2_trace_buf);
-#endif
-#ifdef XFS_ATTR_TRACE
- ktrace_free(xfs_attr_trace_buf);
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(xfs_bmbt_trace_buf);
- ktrace_free(xfs_inobt_trace_buf);
- ktrace_free(xfs_allocbt_trace_buf);
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(xfs_bmap_trace_buf);
-#endif
-#ifdef XFS_ALLOC_TRACE
- ktrace_free(xfs_alloc_trace_buf);
-#endif
-}
-
-STATIC int __init
xfs_init_zones(void)
{
@@ -1830,7 +1741,6 @@ init_xfs_fs(void)
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
- ktrace_init(64);
xfs_ioend_init();
xfs_dir_startup();
@@ -1838,13 +1748,9 @@ init_xfs_fs(void)
if (error)
goto out;
- error = xfs_alloc_trace_bufs();
- if (error)
- goto out_destroy_zones;
-
error = xfs_mru_cache_init();
if (error)
- goto out_free_trace_buffers;
+ goto out_destroy_zones;
error = xfs_filestream_init();
if (error)
@@ -1879,8 +1785,6 @@ init_xfs_fs(void)
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
- out_free_trace_buffers:
- xfs_free_trace_bufs();
out_destroy_zones:
xfs_destroy_zones();
out:
@@ -1897,9 +1801,7 @@ exit_xfs_fs(void)
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
- xfs_free_trace_bufs();
xfs_destroy_zones();
- ktrace_uninit();
}
module_init(init_xfs_fs);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 18175ebd58ed..233d4b9881b1 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
# define XFS_BIGFS_STRING
#endif
-#ifdef CONFIG_XFS_TRACE
-# define XFS_TRACE_STRING "tracing, "
-#else
-# define XFS_TRACE_STRING
-#endif
-
#ifdef CONFIG_XFS_DMAPI
# define XFS_DMAPI_STRING "dmapi support, "
#else
@@ -78,7 +72,6 @@ extern void xfs_qm_exit(void);
XFS_SECURITY_STRING \
XFS_REALTIME_STRING \
XFS_BIGFS_STRING \
- XFS_TRACE_STRING \
XFS_DMAPI_STRING \
XFS_DBG_STRING /* DBG must be last */
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index d895a3a960f5..6fed97a8cd3e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -44,6 +44,7 @@
#include "xfs_inode_item.h"
#include "xfs_rw.h"
#include "xfs_quota.h"
+#include "xfs_trace.h"
#include <linux/kthread.h>
#include <linux/freezer.h>
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
new file mode 100644
index 000000000000..856eb3c8d605
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2009, Christoph Hellwig
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir2.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_ialloc.h"
+#include "xfs_itable.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_log_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_quota.h"
+#include "xfs_iomap.h"
+#include "xfs_aops.h"
+#include "quota/xfs_dquot_item.h"
+#include "quota/xfs_dquot.h"
+
+/*
+ * Format fsblock number into a static buffer & return it.
+ */
+STATIC char *xfs_fmtfsblock(xfs_fsblock_t bno)
+{
+ static char rval[50];
+
+ if (bno == NULLFSBLOCK)
+ sprintf(rval, "NULLFSBLOCK");
+ else if (isnullstartblock(bno))
+ sprintf(rval, "NULLSTARTBLOCK(%lld)", startblockval(bno));
+ else
+ sprintf(rval, "%lld", (xfs_dfsbno_t)bno);
+ return rval;
+}
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "xfs_trace.h"
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
new file mode 100644
index 000000000000..c40834bdee58
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (c) 2009, Christoph Hellwig
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xfs
+
+#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XFS_H
+
+#include <linux/tracepoint.h>
+
+struct xfs_agf;
+struct xfs_alloc_arg;
+struct xfs_attr_list_context;
+struct xfs_buf_log_item;
+struct xfs_da_args;
+struct xfs_da_node_entry;
+struct xfs_dquot;
+struct xlog_ticket;
+struct log;
+
+#define DEFINE_ATTR_LIST_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_attr_list_context *ctx), \
+ TP_ARGS(ctx), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(u32, hashval) \
+ __field(u32, blkno) \
+ __field(u32, offset) \
+ __field(void *, alist) \
+ __field(int, bufsize) \
+ __field(int, count) \
+ __field(int, firstu) \
+ __field(int, dupcnt) \
+ __field(int, flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
+ __entry->ino = ctx->dp->i_ino; \
+ __entry->hashval = ctx->cursor->hashval; \
+ __entry->blkno = ctx->cursor->blkno; \
+ __entry->offset = ctx->cursor->offset; \
+ __entry->alist = ctx->alist; \
+ __entry->bufsize = ctx->bufsize; \
+ __entry->count = ctx->count; \
+ __entry->firstu = ctx->firstu; \
+ __entry->flags = ctx->flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
+ "alist 0x%p size %u count %u firstu %u flags %d %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->hashval, \
+ __entry->blkno, \
+ __entry->offset, \
+ __entry->dupcnt, \
+ __entry->alist, \
+ __entry->bufsize, \
+ __entry->count, \
+ __entry->firstu, \
+ __entry->flags, \
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
+ ) \
+)
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
+
+TRACE_EVENT(xfs_attr_list_node_descend,
+ TP_PROTO(struct xfs_attr_list_context *ctx,
+ struct xfs_da_node_entry *btree),
+ TP_ARGS(ctx, btree),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(u32, hashval)
+ __field(u32, blkno)
+ __field(u32, offset)
+ __field(void *, alist)
+ __field(int, bufsize)
+ __field(int, count)
+ __field(int, firstu)
+ __field(int, dupcnt)
+ __field(int, flags)
+ __field(u32, bt_hashval)
+ __field(u32, bt_before)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
+ __entry->ino = ctx->dp->i_ino;
+ __entry->hashval = ctx->cursor->hashval;
+ __entry->blkno = ctx->cursor->blkno;
+ __entry->offset = ctx->cursor->offset;
+ __entry->alist = ctx->alist;
+ __entry->bufsize = ctx->bufsize;
+ __entry->count = ctx->count;
+ __entry->firstu = ctx->firstu;
+ __entry->flags = ctx->flags;
+ __entry->bt_hashval = be32_to_cpu(btree->hashval);
+ __entry->bt_before = be32_to_cpu(btree->before);
+ ),
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
+ "alist 0x%p size %u count %u firstu %u flags %d %s "
+ "node hashval %u, node before %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->hashval,
+ __entry->blkno,
+ __entry->offset,
+ __entry->dupcnt,
+ __entry->alist,
+ __entry->bufsize,
+ __entry->count,
+ __entry->firstu,
+ __entry->flags,
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
+ __entry->bt_hashval,
+ __entry->bt_before)
+);
+
+TRACE_EVENT(xfs_iext_insert,
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
+ struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
+ TP_ARGS(ip, idx, r, state, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_extnum_t, idx)
+ __field(xfs_fileoff_t, startoff)
+ __field(xfs_fsblock_t, startblock)
+ __field(xfs_filblks_t, blockcount)
+ __field(xfs_exntst_t, state)
+ __field(int, bmap_state)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->idx = idx;
+ __entry->startoff = r->br_startoff;
+ __entry->startblock = r->br_startblock;
+ __entry->blockcount = r->br_blockcount;
+ __entry->state = r->br_state;
+ __entry->bmap_state = state;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
+ "offset %lld block %s count %lld flag %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
+ (long)__entry->idx,
+ __entry->startoff,
+ xfs_fmtfsblock(__entry->startblock),
+ __entry->blockcount,
+ __entry->state,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_BMAP_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
+ unsigned long caller_ip), \
+ TP_ARGS(ip, idx, state, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_extnum_t, idx) \
+ __field(xfs_fileoff_t, startoff) \
+ __field(xfs_fsblock_t, startblock) \
+ __field(xfs_filblks_t, blockcount) \
+ __field(xfs_exntst_t, state) \
+ __field(int, bmap_state) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
+ ip->i_afp : &ip->i_df; \
+ struct xfs_bmbt_irec r; \
+ \
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->idx = idx; \
+ __entry->startoff = r.br_startoff; \
+ __entry->startblock = r.br_startblock; \
+ __entry->blockcount = r.br_blockcount; \
+ __entry->state = r.br_state; \
+ __entry->bmap_state = state; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
+ "offset %lld block %s count %lld flag %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
+ (long)__entry->idx, \
+ __entry->startoff, \
+ xfs_fmtfsblock(__entry->startblock), \
+ __entry->blockcount, \
+ __entry->state, \
+ (char *)__entry->caller_ip) \
+)
+
+DEFINE_BMAP_EVENT(xfs_iext_remove);
+DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
+DEFINE_BMAP_EVENT(xfs_bmap_post_update);
+DEFINE_BMAP_EVENT(xfs_extlist);
+
+#define DEFINE_BUF_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
+ TP_ARGS(bp, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, bno) \
+ __field(size_t, buffer_length) \
+ __field(int, hold) \
+ __field(int, pincount) \
+ __field(unsigned, lockval) \
+ __field(unsigned, flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bp->b_target->bt_dev; \
+ __entry->bno = bp->b_bn; \
+ __entry->buffer_length = bp->b_buffer_length; \
+ __entry->hold = atomic_read(&bp->b_hold); \
+ __entry->pincount = atomic_read(&bp->b_pin_count); \
+ __entry->lockval = xfs_buf_lock_value(bp); \
+ __entry->flags = bp->b_flags; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->bno, \
+ __entry->buffer_length, \
+ __entry->hold, \
+ __entry->pincount, \
+ __entry->lockval, \
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+DEFINE_BUF_EVENT(xfs_buf_init);
+DEFINE_BUF_EVENT(xfs_buf_free);
+DEFINE_BUF_EVENT(xfs_buf_hold);
+DEFINE_BUF_EVENT(xfs_buf_rele);
+DEFINE_BUF_EVENT(xfs_buf_pin);
+DEFINE_BUF_EVENT(xfs_buf_unpin);
+DEFINE_BUF_EVENT(xfs_buf_iodone);
+DEFINE_BUF_EVENT(xfs_buf_iorequest);
+DEFINE_BUF_EVENT(xfs_buf_bawrite);
+DEFINE_BUF_EVENT(xfs_buf_bdwrite);
+DEFINE_BUF_EVENT(xfs_buf_lock);
+DEFINE_BUF_EVENT(xfs_buf_lock_done);
+DEFINE_BUF_EVENT(xfs_buf_cond_lock);
+DEFINE_BUF_EVENT(xfs_buf_unlock);
+DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
+DEFINE_BUF_EVENT(xfs_buf_iowait);
+DEFINE_BUF_EVENT(xfs_buf_iowait_done);
+DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
+DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
+DEFINE_BUF_EVENT(xfs_buf_delwri_split);
+DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
+DEFINE_BUF_EVENT(xfs_bdstrat_shut);
+DEFINE_BUF_EVENT(xfs_buf_item_relse);
+DEFINE_BUF_EVENT(xfs_buf_item_iodone);
+DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
+DEFINE_BUF_EVENT(xfs_buf_error_relse);
+DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
+DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
+
+/* not really buffer traces, but the buf provides useful information */
+DEFINE_BUF_EVENT(xfs_btree_corrupt);
+DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
+DEFINE_BUF_EVENT(xfs_reset_dqcounts);
+DEFINE_BUF_EVENT(xfs_inode_item_push);
+
+/* pass flags explicitly */
+#define DEFINE_BUF_FLAGS_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
+ TP_ARGS(bp, flags, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, bno) \
+ __field(size_t, buffer_length) \
+ __field(int, hold) \
+ __field(int, pincount) \
+ __field(unsigned, lockval) \
+ __field(unsigned, flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bp->b_target->bt_dev; \
+ __entry->bno = bp->b_bn; \
+ __entry->buffer_length = bp->b_buffer_length; \
+ __entry->flags = flags; \
+ __entry->hold = atomic_read(&bp->b_hold); \
+ __entry->pincount = atomic_read(&bp->b_pin_count); \
+ __entry->lockval = xfs_buf_lock_value(bp); \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->bno, \
+ __entry->buffer_length, \
+ __entry->hold, \
+ __entry->pincount, \
+ __entry->lockval, \
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
+
+TRACE_EVENT(xfs_buf_ioerror,
+ TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
+ TP_ARGS(bp, error, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(unsigned, flags)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(int, error)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->error = error;
+ __entry->flags = bp->b_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d error %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __entry->error,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
+);
+
+#define DEFINE_BUF_ITEM_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf_log_item *bip), \
+ TP_ARGS(bip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, buf_bno) \
+ __field(size_t, buf_len) \
+ __field(int, buf_hold) \
+ __field(int, buf_pincount) \
+ __field(int, buf_lockval) \
+ __field(unsigned, buf_flags) \
+ __field(unsigned, bli_recur) \
+ __field(int, bli_refcount) \
+ __field(unsigned, bli_flags) \
+ __field(void *, li_desc) \
+ __field(unsigned, li_flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bip->bli_buf->b_target->bt_dev; \
+ __entry->bli_flags = bip->bli_flags; \
+ __entry->bli_recur = bip->bli_recur; \
+ __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
+ __entry->buf_bno = bip->bli_buf->b_bn; \
+ __entry->buf_len = bip->bli_buf->b_buffer_length; \
+ __entry->buf_flags = bip->bli_buf->b_flags; \
+ __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
+ __entry->buf_pincount = \
+ atomic_read(&bip->bli_buf->b_pin_count); \
+ __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
+ __entry->li_desc = bip->bli_item.li_desc; \
+ __entry->li_flags = bip->bli_item.li_flags; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s recur %d refcount %d bliflags %s " \
+ "lidesc 0x%p liflags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->buf_bno, \
+ __entry->buf_len, \
+ __entry->buf_hold, \
+ __entry->buf_pincount, \
+ __entry->buf_lockval, \
+ __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
+ __entry->bli_recur, \
+ __entry->bli_refcount, \
+ __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
+ __entry->li_desc, \
+ __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
+)
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
+
+#define DEFINE_LOCK_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
+ unsigned long caller_ip), \
+ TP_ARGS(ip, lock_flags, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, lock_flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->lock_flags = lock_flags; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+
+DEFINE_LOCK_EVENT(xfs_ilock);
+DEFINE_LOCK_EVENT(xfs_ilock_nowait);
+DEFINE_LOCK_EVENT(xfs_ilock_demote);
+DEFINE_LOCK_EVENT(xfs_iunlock);
+
+#define DEFINE_IGET_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip), \
+ TP_ARGS(ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino) \
+)
+DEFINE_IGET_EVENT(xfs_iget_skip);
+DEFINE_IGET_EVENT(xfs_iget_reclaim);
+DEFINE_IGET_EVENT(xfs_iget_found);
+DEFINE_IGET_EVENT(xfs_iget_alloc);
+
+#define DEFINE_INODE_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
+ TP_ARGS(ip, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, count) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->count = atomic_read(&VFS_I(ip)->i_count); \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->count, \
+ (char *)__entry->caller_ip) \
+)
+DEFINE_INODE_EVENT(xfs_ihold);
+DEFINE_INODE_EVENT(xfs_irele);
+/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
+DEFINE_INODE_EVENT(xfs_inode);
+#define xfs_itrace_entry(ip) \
+ trace_xfs_inode(ip, _THIS_IP_)
+
+#define DEFINE_DQUOT_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_dquot *dqp), \
+ TP_ARGS(dqp), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(__be32, id) \
+ __field(unsigned, flags) \
+ __field(unsigned, nrefs) \
+ __field(unsigned long long, res_bcount) \
+ __field(unsigned long long, bcount) \
+ __field(unsigned long long, icount) \
+ __field(unsigned long long, blk_hardlimit) \
+ __field(unsigned long long, blk_softlimit) \
+ __field(unsigned long long, ino_hardlimit) \
+ __field(unsigned long long, ino_softlimit) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = dqp->q_mount->m_super->s_dev; \
+ __entry->id = dqp->q_core.d_id; \
+ __entry->flags = dqp->dq_flags; \
+ __entry->nrefs = dqp->q_nrefs; \
+ __entry->res_bcount = dqp->q_res_bcount; \
+ __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
+ __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
+ __entry->blk_hardlimit = \
+ be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
+ __entry->blk_softlimit = \
+ be64_to_cpu(dqp->q_core.d_blk_softlimit); \
+ __entry->ino_hardlimit = \
+ be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
+ __entry->ino_softlimit = \
+ be64_to_cpu(dqp->q_core.d_ino_softlimit); \
+ ), \
+ TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
+ "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
+ "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ be32_to_cpu(__entry->id), \
+ __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
+ __entry->nrefs, \
+ __entry->res_bcount, \
+ __entry->bcount, \
+ __entry->blk_hardlimit, \
+ __entry->blk_softlimit, \
+ __entry->icount, \
+ __entry->ino_hardlimit, \
+ __entry->ino_softlimit) \
+)
+DEFINE_DQUOT_EVENT(xfs_dqadjust);
+DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
+DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
+DEFINE_DQUOT_EVENT(xfs_dqattach_found);
+DEFINE_DQUOT_EVENT(xfs_dqattach_get);
+DEFINE_DQUOT_EVENT(xfs_dqinit);
+DEFINE_DQUOT_EVENT(xfs_dqreuse);
+DEFINE_DQUOT_EVENT(xfs_dqalloc);
+DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
+DEFINE_DQUOT_EVENT(xfs_dqread);
+DEFINE_DQUOT_EVENT(xfs_dqread_fail);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
+DEFINE_DQUOT_EVENT(xfs_dqget_hit);
+DEFINE_DQUOT_EVENT(xfs_dqget_miss);
+DEFINE_DQUOT_EVENT(xfs_dqput);
+DEFINE_DQUOT_EVENT(xfs_dqput_wait);
+DEFINE_DQUOT_EVENT(xfs_dqput_free);
+DEFINE_DQUOT_EVENT(xfs_dqrele);
+DEFINE_DQUOT_EVENT(xfs_dqflush);
+DEFINE_DQUOT_EVENT(xfs_dqflush_force);
+DEFINE_DQUOT_EVENT(xfs_dqflush_done);
+/* not really iget events, but we re-use the format */
+DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
+DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
+
+
+#define DEFINE_LOGGRANT_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+ TP_ARGS(log, tic), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(unsigned, trans_type) \
+ __field(char, ocnt) \
+ __field(char, cnt) \
+ __field(int, curr_res) \
+ __field(int, unit_res) \
+ __field(unsigned int, flags) \
+ __field(void *, reserve_headq) \
+ __field(void *, write_headq) \
+ __field(int, grant_reserve_cycle) \
+ __field(int, grant_reserve_bytes) \
+ __field(int, grant_write_cycle) \
+ __field(int, grant_write_bytes) \
+ __field(int, curr_cycle) \
+ __field(int, curr_block) \
+ __field(xfs_lsn_t, tail_lsn) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = log->l_mp->m_super->s_dev; \
+ __entry->trans_type = tic->t_trans_type; \
+ __entry->ocnt = tic->t_ocnt; \
+ __entry->cnt = tic->t_cnt; \
+ __entry->curr_res = tic->t_curr_res; \
+ __entry->unit_res = tic->t_unit_res; \
+ __entry->flags = tic->t_flags; \
+ __entry->reserve_headq = log->l_reserve_headq; \
+ __entry->write_headq = log->l_write_headq; \
+ __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
+ __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
+ __entry->grant_write_cycle = log->l_grant_write_cycle; \
+ __entry->grant_write_bytes = log->l_grant_write_bytes; \
+ __entry->curr_cycle = log->l_curr_cycle; \
+ __entry->curr_block = log->l_curr_block; \
+ __entry->tail_lsn = log->l_tail_lsn; \
+ ), \
+ TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
+ "t_unit_res %u t_flags %s reserve_headq 0x%p " \
+ "write_headq 0x%p grant_reserve_cycle %d " \
+ "grant_reserve_bytes %d grant_write_cycle %d " \
+ "grant_write_bytes %d curr_cycle %d curr_block %d " \
+ "tail_cycle %d tail_block %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
+ __entry->ocnt, \
+ __entry->cnt, \
+ __entry->curr_res, \
+ __entry->unit_res, \
+ __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
+ __entry->reserve_headq, \
+ __entry->write_headq, \
+ __entry->grant_reserve_cycle, \
+ __entry->grant_reserve_bytes, \
+ __entry->grant_write_cycle, \
+ __entry->grant_write_bytes, \
+ __entry->curr_cycle, \
+ __entry->curr_block, \
+ CYCLE_LSN(__entry->tail_lsn), \
+ BLOCK_LSN(__entry->tail_lsn) \
+ ) \
+)
+DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
+DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
+DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
+DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
+
+#define DEFINE_RW_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
+ TP_ARGS(ip, count, offset, flags), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_fsize_t, size) \
+ __field(xfs_fsize_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ __field(int, flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ __entry->flags = flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count 0x%zx ioflags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count, \
+ __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
+)
+DEFINE_RW_EVENT(xfs_file_read);
+DEFINE_RW_EVENT(xfs_file_buffered_write);
+DEFINE_RW_EVENT(xfs_file_direct_write);
+DEFINE_RW_EVENT(xfs_file_splice_read);
+DEFINE_RW_EVENT(xfs_file_splice_write);
+
+
+#define DEFINE_PAGE_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
+ TP_ARGS(inode, page, off), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(pgoff_t, pgoff) \
+ __field(loff_t, size) \
+ __field(unsigned long, offset) \
+ __field(int, delalloc) \
+ __field(int, unmapped) \
+ __field(int, unwritten) \
+ ), \
+ TP_fast_assign( \
+ int delalloc = -1, unmapped = -1, unwritten = -1; \
+ \
+ if (page_has_buffers(page)) \
+ xfs_count_page_state(page, &delalloc, \
+ &unmapped, &unwritten); \
+ __entry->dev = inode->i_sb->s_dev; \
+ __entry->ino = XFS_I(inode)->i_ino; \
+ __entry->pgoff = page_offset(page); \
+ __entry->size = i_size_read(inode); \
+ __entry->offset = off; \
+ __entry->delalloc = delalloc; \
+ __entry->unmapped = unmapped; \
+ __entry->unwritten = unwritten; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
+ "delalloc %d unmapped %d unwritten %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->pgoff, \
+ __entry->size, \
+ __entry->offset, \
+ __entry->delalloc, \
+ __entry->unmapped, \
+ __entry->unwritten) \
+)
+DEFINE_PAGE_EVENT(xfs_writepage);
+DEFINE_PAGE_EVENT(xfs_releasepage);
+DEFINE_PAGE_EVENT(xfs_invalidatepage);
+
+#define DEFINE_IOMAP_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
+ int flags, struct xfs_bmbt_irec *irec), \
+ TP_ARGS(ip, offset, count, flags, irec), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(loff_t, size) \
+ __field(loff_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ __field(int, flags) \
+ __field(xfs_fileoff_t, startoff) \
+ __field(xfs_fsblock_t, startblock) \
+ __field(xfs_filblks_t, blockcount) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ __entry->flags = flags; \
+ __entry->startoff = irec ? irec->br_startoff : 0; \
+ __entry->startblock = irec ? irec->br_startblock : 0; \
+ __entry->blockcount = irec ? irec->br_blockcount : 0; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count %zd flags %s " \
+ "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count, \
+ __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
+ __entry->startoff, \
+ __entry->startblock, \
+ __entry->blockcount) \
+)
+DEFINE_IOMAP_EVENT(xfs_iomap_enter);
+DEFINE_IOMAP_EVENT(xfs_iomap_found);
+DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+
+#define DEFINE_SIMPLE_IO_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
+ TP_ARGS(ip, offset, count), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(loff_t, size) \
+ __field(loff_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count %zd", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count) \
+);
+DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
+DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+
+
+TRACE_EVENT(xfs_itruncate_start,
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
+ xfs_off_t toss_start, xfs_off_t toss_finish),
+ TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fsize_t, new_size)
+ __field(xfs_off_t, toss_start)
+ __field(xfs_off_t, toss_finish)
+ __field(int, flag)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = new_size;
+ __entry->toss_start = toss_start;
+ __entry->toss_finish = toss_finish;
+ __entry->flag = flag;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
+ "toss start 0x%llx toss finish 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
+ __entry->size,
+ __entry->new_size,
+ __entry->toss_start,
+ __entry->toss_finish)
+);
+
+#define DEFINE_ITRUNC_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
+ TP_ARGS(ip, new_size), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_fsize_t, size) \
+ __field(xfs_fsize_t, new_size) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = new_size; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size) \
+)
+DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
+
+TRACE_EVENT(xfs_pagecache_inval,
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
+ TP_ARGS(ip, start, finish),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_off_t, start)
+ __field(xfs_off_t, finish)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->start = start;
+ __entry->finish = finish;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->start,
+ __entry->finish)
+);
+
+TRACE_EVENT(xfs_bunmap,
+ TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
+ int flags, unsigned long caller_ip),
+ TP_ARGS(ip, bno, len, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fileoff_t, bno)
+ __field(xfs_filblks_t, len)
+ __field(unsigned long, caller_ip)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->caller_ip = caller_ip;
+ __entry->flags = flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
+ "flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->bno,
+ __entry->len,
+ __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
+ (void *)__entry->caller_ip)
+
+);
+
+TRACE_EVENT(xfs_alloc_busy,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, int slot),
+ TP_ARGS(mp, agno, agbno, len, slot),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, slot)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->slot = slot;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u slot %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->slot)
+
+);
+
+#define XFS_BUSY_STATES \
+ { 0, "found" }, \
+ { 1, "missing" }
+
+TRACE_EVENT(xfs_alloc_unbusy,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int slot, int found),
+ TP_ARGS(mp, agno, slot, found),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, slot)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->slot = slot;
+ __entry->found = found;
+ ),
+ TP_printk("dev %d:%d agno %u slot %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->slot,
+ __print_symbolic(__entry->found, XFS_BUSY_STATES))
+);
+
+TRACE_EVENT(xfs_alloc_busysearch,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, int found),
+ TP_ARGS(mp, agno, agbno, len, found),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->found = found;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __print_symbolic(__entry->found, XFS_BUSY_STATES))
+);
+
+TRACE_EVENT(xfs_agf,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agf, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, flags)
+ __field(__u32, length)
+ __field(__u32, bno_root)
+ __field(__u32, cnt_root)
+ __field(__u32, bno_level)
+ __field(__u32, cnt_level)
+ __field(__u32, flfirst)
+ __field(__u32, fllast)
+ __field(__u32, flcount)
+ __field(__u32, freeblks)
+ __field(__u32, longest)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = be32_to_cpu(agf->agf_seqno),
+ __entry->flags = flags;
+ __entry->length = be32_to_cpu(agf->agf_length),
+ __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
+ __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
+ __entry->bno_level =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
+ __entry->cnt_level =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
+ __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
+ __entry->fllast = be32_to_cpu(agf->agf_fllast),
+ __entry->flcount = be32_to_cpu(agf->agf_flcount),
+ __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
+ __entry->longest = be32_to_cpu(agf->agf_longest);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
+ "levels b %u c %u flfirst %u fllast %u flcount %u "
+ "freeblks %u longest %u caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
+ __entry->length,
+ __entry->bno_root,
+ __entry->cnt_root,
+ __entry->bno_level,
+ __entry->cnt_level,
+ __entry->flfirst,
+ __entry->fllast,
+ __entry->flcount,
+ __entry->freeblks,
+ __entry->longest,
+ (void *)__entry->caller_ip)
+);
+
+TRACE_EVENT(xfs_free_extent,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, bool isfl, int haveleft, int haveright),
+ TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, isfl)
+ __field(int, haveleft)
+ __field(int, haveright)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->isfl = isfl;
+ __entry->haveleft = haveleft;
+ __entry->haveright = haveright;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->isfl,
+ __entry->haveleft ?
+ (__entry->haveright ? "both" : "left") :
+ (__entry->haveright ? "right" : "none"))
+
+);
+
+#define DEFINE_ALLOC_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_alloc_arg *args), \
+ TP_ARGS(args), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_agnumber_t, agno) \
+ __field(xfs_agblock_t, agbno) \
+ __field(xfs_extlen_t, minlen) \
+ __field(xfs_extlen_t, maxlen) \
+ __field(xfs_extlen_t, mod) \
+ __field(xfs_extlen_t, prod) \
+ __field(xfs_extlen_t, minleft) \
+ __field(xfs_extlen_t, total) \
+ __field(xfs_extlen_t, alignment) \
+ __field(xfs_extlen_t, minalignslop) \
+ __field(xfs_extlen_t, len) \
+ __field(short, type) \
+ __field(short, otype) \
+ __field(char, wasdel) \
+ __field(char, wasfromfl) \
+ __field(char, isfl) \
+ __field(char, userdata) \
+ __field(xfs_fsblock_t, firstblock) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = args->mp->m_super->s_dev; \
+ __entry->agno = args->agno; \
+ __entry->agbno = args->agbno; \
+ __entry->minlen = args->minlen; \
+ __entry->maxlen = args->maxlen; \
+ __entry->mod = args->mod; \
+ __entry->prod = args->prod; \
+ __entry->minleft = args->minleft; \
+ __entry->total = args->total; \
+ __entry->alignment = args->alignment; \
+ __entry->minalignslop = args->minalignslop; \
+ __entry->len = args->len; \
+ __entry->type = args->type; \
+ __entry->otype = args->otype; \
+ __entry->wasdel = args->wasdel; \
+ __entry->wasfromfl = args->wasfromfl; \
+ __entry->isfl = args->isfl; \
+ __entry->userdata = args->userdata; \
+ __entry->firstblock = args->firstblock; \
+ ), \
+ TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
+ "prod %u minleft %u total %u alignment %u minalignslop %u " \
+ "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
+ "userdata %d firstblock 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->agno, \
+ __entry->agbno, \
+ __entry->minlen, \
+ __entry->maxlen, \
+ __entry->mod, \
+ __entry->prod, \
+ __entry->minleft, \
+ __entry->total, \
+ __entry->alignment, \
+ __entry->minalignslop, \
+ __entry->len, \
+ __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
+ __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
+ __entry->wasdel, \
+ __entry->wasfromfl, \
+ __entry->isfl, \
+ __entry->userdata, \
+ __entry->firstblock) \
+)
+
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
+
+#define DEFINE_DIR2_TRACE(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_da_args *args), \
+ TP_ARGS(args), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __dynamic_array(char, name, args->namelen) \
+ __field(int, namelen) \
+ __field(xfs_dahash_t, hashval) \
+ __field(xfs_ino_t, inumber) \
+ __field(int, op_flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
+ __entry->ino = args->dp->i_ino; \
+ if (args->namelen) \
+ memcpy(__get_str(name), args->name, args->namelen); \
+ __entry->namelen = args->namelen; \
+ __entry->hashval = args->hashval; \
+ __entry->inumber = args->inumber; \
+ __entry->op_flags = args->op_flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
+ "inumber 0x%llx op_flags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->namelen, \
+ __entry->namelen ? __get_str(name) : NULL, \
+ __entry->namelen, \
+ __entry->hashval, \
+ __entry->inumber, \
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
+)
+DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
+DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
+DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
+DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
+
+#define DEFINE_DIR2_SPACE_TRACE(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_da_args *args, int idx), \
+ TP_ARGS(args, idx), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, op_flags) \
+ __field(int, idx) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
+ __entry->ino = args->dp->i_ino; \
+ __entry->op_flags = args->op_flags; \
+ __entry->idx = idx; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
+ __entry->idx) \
+)
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
+
+TRACE_EVENT(xfs_dir2_leafn_moveents,
+ TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
+ TP_ARGS(args, src_idx, dst_idx, count),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, op_flags)
+ __field(int, src_idx)
+ __field(int, dst_idx)
+ __field(int, count)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ __entry->op_flags = args->op_flags;
+ __entry->src_idx = src_idx;
+ __entry->dst_idx = dst_idx;
+ __entry->count = count;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s "
+ "src_idx %d dst_idx %d count %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
+ __entry->src_idx,
+ __entry->dst_idx,
+ __entry->count)
+);
+
+#endif /* _TRACE_XFS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE xfs_trace
+#include <trace/define_trace.h>
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 00cabf5354d2..7c220b4227bc 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -39,6 +39,10 @@ struct attrlist_cursor_kern;
#define IO_ISDIRECT 0x00004 /* bypass page cache */
#define IO_INVIS 0x00020 /* don't update inode timestamps */
+#define XFS_IO_FLAGS \
+ { IO_ISDIRECT, "DIRECT" }, \
+ { IO_INVIS, "INVIS"}
+
/*
* Flush/Invalidate options for vop_toss/flush/flushinval_pages.
*/
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 2f3f2229eaaf..d7c7eea09fc2 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
/*
@@ -112,10 +113,7 @@ xfs_qm_dqinit(
init_completion(&dqp->q_flush);
complete(&dqp->q_flush);
-#ifdef XFS_DQUOT_TRACE
- dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
- xfs_dqtrace_entry(dqp, "DQINIT");
-#endif
+ trace_xfs_dqinit(dqp);
} else {
/*
* Only the q_core portion was zeroed in dqreclaim_one().
@@ -136,10 +134,7 @@ xfs_qm_dqinit(
dqp->q_hash = NULL;
ASSERT(dqp->dq_flnext == dqp->dq_flprev);
-#ifdef XFS_DQUOT_TRACE
- ASSERT(dqp->q_trace);
- xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
-#endif
+ trace_xfs_dqreuse(dqp);
}
/*
@@ -167,13 +162,8 @@ xfs_qm_dqdestroy(
mutex_destroy(&dqp->q_qlock);
sv_destroy(&dqp->q_pinwait);
-
-#ifdef XFS_DQUOT_TRACE
- if (dqp->q_trace)
- ktrace_free(dqp->q_trace);
- dqp->q_trace = NULL;
-#endif
kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
+
atomic_dec(&xfs_Gqm->qm_totaldquots);
}
@@ -195,49 +185,6 @@ xfs_qm_dqinit_core(
d->dd_diskdq.d_flags = type;
}
-
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot tracing for debugging.
- */
-/* ARGSUSED */
-void
-__xfs_dqtrace_entry(
- xfs_dquot_t *dqp,
- char *func,
- void *retaddr,
- xfs_inode_t *ip)
-{
- xfs_dquot_t *udqp = NULL;
- xfs_ino_t ino = 0;
-
- ASSERT(dqp->q_trace);
- if (ip) {
- ino = ip->i_ino;
- udqp = ip->i_udquot;
- }
- ktrace_enter(dqp->q_trace,
- (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
- (void *)func,
- (void *)(__psint_t)dqp->q_nrefs,
- (void *)(__psint_t)dqp->dq_flags,
- (void *)(__psint_t)dqp->q_res_bcount,
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
- (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
- (void *)(__psint_t)current_pid(),
- (void *)(__psint_t)ino,
- (void *)(__psint_t)retaddr,
- (void *)(__psint_t)udqp);
- return;
-}
-#endif
-
-
/*
* If default limits are in force, push them into the dquot now.
* We overwrite the dquot limits only if they are zero and this
@@ -425,7 +372,8 @@ xfs_qm_dqalloc(
xfs_trans_t *tp = *tpp;
ASSERT(tp != NULL);
- xfs_dqtrace_entry(dqp, "DQALLOC");
+
+ trace_xfs_dqalloc(dqp);
/*
* Initialize the bmap freelist prior to calling bmapi code.
@@ -612,7 +560,8 @@ xfs_qm_dqtobp(
* (in which case we already have the buf).
*/
if (! newdquot) {
- xfs_dqtrace_entry(dqp, "DQTOBP READBUF");
+ trace_xfs_dqtobp_read(dqp);
+
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno,
XFS_QI_DQCHUNKLEN(mp),
@@ -670,11 +619,12 @@ xfs_qm_dqread(
ASSERT(tpp);
+ trace_xfs_dqread(dqp);
+
/*
* get a pointer to the on-disk dquot and the buffer containing it
* dqp already knows its own type (GROUP/USER).
*/
- xfs_dqtrace_entry(dqp, "DQREAD");
if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
return (error);
}
@@ -763,7 +713,7 @@ xfs_qm_idtodq(
* or if the dquot didn't exist on disk and we ask to
* allocate (ENOENT).
*/
- xfs_dqtrace_entry(dqp, "DQREAD FAIL");
+ trace_xfs_dqread_fail(dqp);
cancelflags |= XFS_TRANS_ABORT;
goto error0;
}
@@ -817,7 +767,8 @@ xfs_qm_dqlookup(
* id can't be modified without the hashlock anyway.
*/
if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
- xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");
+ trace_xfs_dqlookup_found(dqp);
+
/*
* All in core dquots must be on the dqlist of mp
*/
@@ -827,7 +778,7 @@ xfs_qm_dqlookup(
if (dqp->q_nrefs == 0) {
ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");
+ trace_xfs_dqlookup_want(dqp);
/*
* We may have raced with dqreclaim_one()
@@ -857,8 +808,7 @@ xfs_qm_dqlookup(
/*
* take it off the freelist
*/
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: TAKEOFF FL");
+ trace_xfs_dqlookup_freelist(dqp);
XQM_FREELIST_REMOVE(dqp);
/* xfs_qm_freelist_print(&(xfs_Gqm->
qm_dqfreelist),
@@ -878,8 +828,7 @@ xfs_qm_dqlookup(
*/
ASSERT(mutex_is_locked(&qh->qh_lock));
if (dqp->HL_PREVP != &qh->qh_next) {
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: HASH MOVETOFRONT");
+ trace_xfs_dqlookup_move(dqp);
if ((d = dqp->HL_NEXT))
d->HL_PREVP = dqp->HL_PREVP;
*(dqp->HL_PREVP) = d;
@@ -889,7 +838,7 @@ xfs_qm_dqlookup(
dqp->HL_PREVP = &qh->qh_next;
qh->qh_next = dqp;
}
- xfs_dqtrace_entry(dqp, "LOOKUP END");
+ trace_xfs_dqlookup_done(dqp);
*O_dqpp = dqp;
ASSERT(mutex_is_locked(&qh->qh_lock));
return (0);
@@ -971,7 +920,7 @@ xfs_qm_dqget(
ASSERT(*O_dqpp);
ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
mutex_unlock(&h->qh_lock);
- xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
+ trace_xfs_dqget_hit(*O_dqpp);
return (0); /* success */
}
XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
@@ -1104,7 +1053,7 @@ xfs_qm_dqget(
mutex_unlock(&h->qh_lock);
dqret:
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
- xfs_dqtrace_entry(dqp, "DQGET DONE");
+ trace_xfs_dqget_miss(dqp);
*O_dqpp = dqp;
return (0);
}
@@ -1124,7 +1073,8 @@ xfs_qm_dqput(
ASSERT(dqp->q_nrefs > 0);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "DQPUT");
+
+ trace_xfs_dqput(dqp);
if (dqp->q_nrefs != 1) {
dqp->q_nrefs--;
@@ -1137,7 +1087,7 @@ xfs_qm_dqput(
* in the right order; but try to get it out-of-order first
*/
if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");
+ trace_xfs_dqput_wait(dqp);
xfs_dqunlock(dqp);
xfs_qm_freelist_lock(xfs_Gqm);
xfs_dqlock(dqp);
@@ -1148,7 +1098,8 @@ xfs_qm_dqput(
/* We can't depend on nrefs being == 1 here */
if (--dqp->q_nrefs == 0) {
- xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");
+ trace_xfs_dqput_free(dqp);
+
/*
* insert at end of the freelist.
*/
@@ -1196,7 +1147,7 @@ xfs_qm_dqrele(
if (!dqp)
return;
- xfs_dqtrace_entry(dqp, "DQRELE");
+ trace_xfs_dqrele(dqp);
xfs_dqlock(dqp);
/*
@@ -1229,7 +1180,7 @@ xfs_qm_dqflush(
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
- xfs_dqtrace_entry(dqp, "DQFLUSH");
+ trace_xfs_dqflush(dqp);
/*
* If not dirty, or it's pinned and we are not supposed to
@@ -1259,7 +1210,6 @@ xfs_qm_dqflush(
* the ondisk-dquot has already been allocated for.
*/
if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
- xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
ASSERT(error != ENOENT);
/*
* Quotas could have gotten turned off (ESRCH)
@@ -1297,7 +1247,7 @@ xfs_qm_dqflush(
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp)) {
- xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE");
+ trace_xfs_dqflush_force(dqp);
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
}
@@ -1308,7 +1258,9 @@ xfs_qm_dqflush(
} else {
error = xfs_bwrite(mp, bp);
}
- xfs_dqtrace_entry(dqp, "DQFLUSH END");
+
+ trace_xfs_dqflush_done(dqp);
+
/*
* dqp is still locked, but caller is free to unlock it now.
*/
@@ -1483,7 +1435,7 @@ xfs_qm_dqpurge(
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
+
/* dqflush unlocks dqflock */
/*
* Given that dqpurge is a very rare occurrence, it is OK
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a2c16bcee90b..a0f7da586d1b 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -85,9 +85,6 @@ typedef struct xfs_dquot {
struct completion q_flush; /* flush completion queue */
atomic_t q_pincount; /* dquot pin count */
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
-#ifdef XFS_DQUOT_TRACE
- struct ktrace *q_trace; /* trace header structure */
-#endif
} xfs_dquot_t;
@@ -144,24 +141,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_OQUOTA_ON((d)->q_mount))))
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot Tracing stuff.
- */
-#define DQUOT_TRACE_SIZE 64
-#define DQUOT_KTRACE_ENTRY 1
-
-extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
- void *, xfs_inode_t *);
-#define xfs_dqtrace_entry_ino(a,b,ip) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
-#define xfs_dqtrace_entry(a,b) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
-#else
-#define xfs_dqtrace_entry(a,b)
-#define xfs_dqtrace_entry_ino(a,b,ip)
-#endif
-
#ifdef QUOTADEBUG
extern void xfs_qm_dqprint(xfs_dquot_t *);
#else
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 45b1bfef7388..9e627a8b5b0e 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -453,7 +454,7 @@ again:
xfs_dqunlock(dqp);
continue;
}
- xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
+
/* XXX a sentinel would be better */
recl = XFS_QI_MPLRECLAIMS(mp);
if (!xfs_dqflock_nowait(dqp)) {
@@ -651,7 +652,7 @@ xfs_qm_dqattach_one(
*/
dqp = *IO_idqpp;
if (dqp) {
- xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
+ trace_xfs_dqattach_found(dqp);
return 0;
}
@@ -704,7 +705,7 @@ xfs_qm_dqattach_one(
if (error)
return error;
- xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
+ trace_xfs_dqattach_get(dqp);
/*
* dqget may have dropped and re-acquired the ilock, but it guarantees
@@ -890,15 +891,15 @@ xfs_qm_dqdetach(
if (!(ip->i_udquot || ip->i_gdquot))
return;
+ trace_xfs_dquot_dqdetach(ip);
+
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
if (ip->i_udquot) {
- xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (ip->i_gdquot) {
- xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
@@ -977,7 +978,6 @@ xfs_qm_sync(
* across a disk write
*/
xfs_qm_mplist_unlock(mp);
- xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
error = xfs_qm_dqflush(dqp, flush_flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -1350,7 +1350,8 @@ xfs_qm_reset_dqcounts(
xfs_disk_dquot_t *ddq;
int j;
- xfs_buftrace("RESET DQUOTS", bp);
+ trace_xfs_reset_dqcounts(bp, _RET_IP_);
+
/*
* Reset all counters and timers. They'll be
* started afresh by xfs_qm_quotacheck.
@@ -1543,7 +1544,9 @@ xfs_qm_quotacheck_dqadjust(
xfs_qcnt_t rtblks)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
+
+ trace_xfs_dqadjust(dqp);
+
/*
* Adjust the inode count and the block count to reflect this inode's
* resource usage.
@@ -1994,7 +1997,9 @@ xfs_qm_shake_freelist(
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
+
+ trace_xfs_dqshake_dirty(dqp);
+
/*
* We flush it delayed write, so don't bother
* releasing the mplock.
@@ -2038,7 +2043,9 @@ xfs_qm_shake_freelist(
return nreclaimed;
goto tryagain;
}
- xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
+
+ trace_xfs_dqshake_unlink(dqp);
+
#ifdef QUOTADEBUG
cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
dqp, be32_to_cpu(dqp->q_core.d_id));
@@ -2125,7 +2132,9 @@ xfs_qm_dqreclaim_one(void)
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
+
+ trace_xfs_dqreclaim_want(dqp);
+
xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
@@ -2171,7 +2180,9 @@ xfs_qm_dqreclaim_one(void)
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
+
+ trace_xfs_dqreclaim_dirty(dqp);
+
/*
* We flush it delayed write, so don't bother
* releasing the freelist lock.
@@ -2194,8 +2205,9 @@ xfs_qm_dqreclaim_one(void)
if (!mutex_trylock(&dqp->q_hash->qh_lock))
goto mplistunlock;
+ trace_xfs_dqreclaim_unlink(dqp);
+
ASSERT(dqp->q_nrefs == 0);
- xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
XQM_FREELIST_REMOVE(dqp);
@@ -2430,7 +2442,7 @@ xfs_qm_vop_dqalloc(
}
}
if (uq)
- xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
+ trace_xfs_dquot_dqalloc(ip);
xfs_iunlock(ip, lockflags);
if (O_udqpp)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 5d1a3b98a6e6..71af76fe8a23 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -49,6 +49,7 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
#ifdef DEBUG
# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
@@ -496,7 +497,6 @@ xfs_qm_scall_setqlim(
ASSERT(error != ENOENT);
return (error);
}
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core;
@@ -602,7 +602,6 @@ xfs_qm_scall_setqlim(
dqp->dq_flags |= XFS_DQ_DIRTY;
xfs_trans_log_dquot(tp, dqp);
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
error = xfs_trans_commit(tp, 0);
xfs_qm_dqprint(dqp);
xfs_qm_dqrele(dqp);
@@ -630,7 +629,6 @@ xfs_qm_scall_getquota(
return (error);
}
- xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
/*
* If everything's NULL, this dquot doesn't quite exist as far as
* our utility programs are concerned.
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
deleted file mode 100644
index 2d494c26717f..000000000000
--- a/fs/xfs/support/ktrace.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <xfs.h>
-
-static kmem_zone_t *ktrace_hdr_zone;
-static kmem_zone_t *ktrace_ent_zone;
-static int ktrace_zentries;
-
-void __init
-ktrace_init(int zentries)
-{
- ktrace_zentries = roundup_pow_of_two(zentries);
-
- ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
- "ktrace_hdr");
- ASSERT(ktrace_hdr_zone);
-
- ktrace_ent_zone = kmem_zone_init(ktrace_zentries
- * sizeof(ktrace_entry_t),
- "ktrace_ent");
- ASSERT(ktrace_ent_zone);
-}
-
-void __exit
-ktrace_uninit(void)
-{
- kmem_zone_destroy(ktrace_hdr_zone);
- kmem_zone_destroy(ktrace_ent_zone);
-}
-
-/*
- * ktrace_alloc()
- *
- * Allocate a ktrace header and enough buffering for the given
- * number of entries. Round the number of entries up to a
- * power of 2 so we can do fast masking to get the index from
- * the atomic index counter.
- */
-ktrace_t *
-ktrace_alloc(int nentries, unsigned int __nocast sleep)
-{
- ktrace_t *ktp;
- ktrace_entry_t *ktep;
- int entries;
-
- ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
-
- if (ktp == (ktrace_t*)NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- return NULL;
- }
-
- /*
- * Special treatment for buffers with the ktrace_zentries entries
- */
- entries = roundup_pow_of_two(nentries);
- if (entries == ktrace_zentries) {
- ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
- sleep);
- } else {
- ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
- sleep | KM_LARGE);
- }
-
- if (ktep == NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- kmem_free(ktp);
-
- return NULL;
- }
-
- ktp->kt_entries = ktep;
- ktp->kt_nentries = entries;
- ASSERT(is_power_of_2(entries));
- ktp->kt_index_mask = entries - 1;
- atomic_set(&ktp->kt_index, 0);
- ktp->kt_rollover = 0;
- return ktp;
-}
-
-
-/*
- * ktrace_free()
- *
- * Free up the ktrace header and buffer. It is up to the caller
- * to ensure that no-one is referencing it.
- */
-void
-ktrace_free(ktrace_t *ktp)
-{
- if (ktp == (ktrace_t *)NULL)
- return;
-
- /*
- * Special treatment for the Vnode trace buffer.
- */
- if (ktp->kt_nentries == ktrace_zentries)
- kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
- else
- kmem_free(ktp->kt_entries);
-
- kmem_zone_free(ktrace_hdr_zone, ktp);
-}
-
-
-/*
- * Enter the given values into the "next" entry in the trace buffer.
- * kt_index is always the index of the next entry to be filled.
- */
-void
-ktrace_enter(
- ktrace_t *ktp,
- void *val0,
- void *val1,
- void *val2,
- void *val3,
- void *val4,
- void *val5,
- void *val6,
- void *val7,
- void *val8,
- void *val9,
- void *val10,
- void *val11,
- void *val12,
- void *val13,
- void *val14,
- void *val15)
-{
- int index;
- ktrace_entry_t *ktep;
-
- ASSERT(ktp != NULL);
-
- /*
- * Grab an entry by pushing the index up to the next one.
- */
- index = atomic_add_return(1, &ktp->kt_index);
- index = (index - 1) & ktp->kt_index_mask;
- if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
- ktp->kt_rollover = 1;
-
- ASSERT((index >= 0) && (index < ktp->kt_nentries));
-
- ktep = &(ktp->kt_entries[index]);
-
- ktep->val[0] = val0;
- ktep->val[1] = val1;
- ktep->val[2] = val2;
- ktep->val[3] = val3;
- ktep->val[4] = val4;
- ktep->val[5] = val5;
- ktep->val[6] = val6;
- ktep->val[7] = val7;
- ktep->val[8] = val8;
- ktep->val[9] = val9;
- ktep->val[10] = val10;
- ktep->val[11] = val11;
- ktep->val[12] = val12;
- ktep->val[13] = val13;
- ktep->val[14] = val14;
- ktep->val[15] = val15;
-}
-
-/*
- * Return the number of entries in the trace buffer.
- */
-int
-ktrace_nentries(
- ktrace_t *ktp)
-{
- int index;
- if (ktp == NULL)
- return 0;
-
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- return (ktp->kt_rollover ? ktp->kt_nentries : index);
-}
-
-/*
- * ktrace_first()
- *
- * This is used to find the start of the trace buffer.
- * In conjunction with ktrace_next() it can be used to
- * iterate through the entire trace buffer. This code does
- * not do any locking because it is assumed that it is called
- * from the debugger.
- *
- * The caller must pass in a pointer to a ktrace_snap
- * structure in which we will keep some state used to
- * iterate through the buffer. This state must not touched
- * by any code outside of this module.
- */
-ktrace_entry_t *
-ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
-{
- ktrace_entry_t *ktep;
- int index;
- int nentries;
-
- if (ktp->kt_rollover)
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- else
- index = 0;
-
- ktsp->ks_start = index;
- ktep = &(ktp->kt_entries[index]);
-
- nentries = ktrace_nentries(ktp);
- index++;
- if (index < nentries) {
- ktsp->ks_index = index;
- } else {
- ktsp->ks_index = 0;
- if (index > nentries)
- ktep = NULL;
- }
- return ktep;
-}
-
-/*
- * ktrace_next()
- *
- * This is used to iterate through the entries of the given
- * trace buffer. The caller must pass in the ktrace_snap_t
- * structure initialized by ktrace_first(). The return value
- * will be either a pointer to the next ktrace_entry or NULL
- * if all of the entries have been traversed.
- */
-ktrace_entry_t *
-ktrace_next(
- ktrace_t *ktp,
- ktrace_snap_t *ktsp)
-{
- int index;
- ktrace_entry_t *ktep;
-
- index = ktsp->ks_index;
- if (index == ktsp->ks_start) {
- ktep = NULL;
- } else {
- ktep = &ktp->kt_entries[index];
- }
-
- index++;
- if (index == ktrace_nentries(ktp)) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = index;
- }
-
- return ktep;
-}
-
-/*
- * ktrace_skip()
- *
- * Skip the next "count" entries and return the entry after that.
- * Return NULL if this causes us to iterate past the beginning again.
- */
-ktrace_entry_t *
-ktrace_skip(
- ktrace_t *ktp,
- int count,
- ktrace_snap_t *ktsp)
-{
- int index;
- int new_index;
- ktrace_entry_t *ktep;
- int nentries = ktrace_nentries(ktp);
-
- index = ktsp->ks_index;
- new_index = index + count;
- while (new_index >= nentries) {
- new_index -= nentries;
- }
- if (index == ktsp->ks_start) {
- /*
- * We've iterated around to the start, so we're done.
- */
- ktep = NULL;
- } else if ((new_index < index) && (index < ktsp->ks_index)) {
- /*
- * We've skipped past the start again, so we're done.
- */
- ktep = NULL;
- ktsp->ks_index = ktsp->ks_start;
- } else {
- ktep = &(ktp->kt_entries[new_index]);
- new_index++;
- if (new_index == nentries) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = new_index;
- }
- }
- return ktep;
-}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
deleted file mode 100644
index 741d6947ca60..000000000000
--- a/fs/xfs/support/ktrace.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_KTRACE_H__
-#define __XFS_SUPPORT_KTRACE_H__
-
-/*
- * Trace buffer entry structure.
- */
-typedef struct ktrace_entry {
- void *val[16];
-} ktrace_entry_t;
-
-/*
- * Trace buffer header structure.
- */
-typedef struct ktrace {
- int kt_nentries; /* number of entries in trace buf */
- atomic_t kt_index; /* current index in entries */
- unsigned int kt_index_mask;
- int kt_rollover;
- ktrace_entry_t *kt_entries; /* buffer of entries */
-} ktrace_t;
-
-/*
- * Trace buffer snapshot structure.
- */
-typedef struct ktrace_snap {
- int ks_start; /* kt_index at time of snap */
- int ks_index; /* current index */
-} ktrace_snap_t;
-
-
-#ifdef CONFIG_XFS_TRACE
-
-extern void ktrace_init(int zentries);
-extern void ktrace_uninit(void);
-
-extern ktrace_t *ktrace_alloc(int, unsigned int __nocast);
-extern void ktrace_free(ktrace_t *);
-
-extern void ktrace_enter(
- ktrace_t *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *);
-
-extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *);
-extern int ktrace_nentries(ktrace_t *);
-extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
-extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
-
-#else
-#define ktrace_init(x) do { } while (0)
-#define ktrace_uninit() do { } while (0)
-#endif /* CONFIG_XFS_TRACE */
-
-#endif /* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 17254b529c54..5ad8ad3a1dcd 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -25,21 +25,5 @@
/* #define QUOTADEBUG 1 */
#endif
-#ifdef CONFIG_XFS_TRACE
-#define XFS_ALLOC_TRACE 1
-#define XFS_ATTR_TRACE 1
-#define XFS_BLI_TRACE 1
-#define XFS_BMAP_TRACE 1
-#define XFS_BTREE_TRACE 1
-#define XFS_DIR2_TRACE 1
-#define XFS_DQUOT_TRACE 1
-#define XFS_ILOCK_TRACE 1
-#define XFS_LOG_TRACE 1
-#define XFS_RW_TRACE 1
-#define XFS_BUF_TRACE 1
-#define XFS_INODE_TRACE 1
-#define XFS_FILESTREAMS_TRACE 1
-#endif
-
#include <linux-2.6/xfs_linux.h>
#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index a5d54bf4931b..6702bd865811 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -86,6 +86,20 @@ typedef struct xfs_agf {
#define XFS_AGF_NUM_BITS 12
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
+#define XFS_AGF_FLAGS \
+ { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
+ { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
+ { XFS_AGF_SEQNO, "SEQNO" }, \
+ { XFS_AGF_LENGTH, "LENGTH" }, \
+ { XFS_AGF_ROOTS, "ROOTS" }, \
+ { XFS_AGF_LEVELS, "LEVELS" }, \
+ { XFS_AGF_FLFIRST, "FLFIRST" }, \
+ { XFS_AGF_FLLAST, "FLLAST" }, \
+ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
+ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
+ { XFS_AGF_LONGEST, "LONGEST" }, \
+ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }
+
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 2cf944eb796d..a1c65fc6d9c4 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -38,6 +38,7 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
@@ -51,30 +52,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len);
-#if defined(XFS_ALLOC_TRACE)
-ktrace_t *xfs_alloc_trace_buf;
-
-#define TRACE_ALLOC(s,a) \
- xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
-#define TRACE_FREE(s,a,b,x,f) \
- xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
-#define TRACE_MODAGF(s,a,f) \
- xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
-#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
-#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
-#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
-#else
-#define TRACE_ALLOC(s,a)
-#define TRACE_FREE(s,a,b,x,f)
-#define TRACE_MODAGF(s,a,f)
-#define TRACE_BUSY(s,a,ag,agb,l,sl,tp)
-#define TRACE_UNBUSY(fname,s,ag,sl,tp)
-#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
-#endif /* XFS_ALLOC_TRACE */
-
/*
* Prototypes for per-ag allocation routines
*/
@@ -498,124 +475,6 @@ xfs_alloc_read_agfl(
return 0;
}
-#if defined(XFS_ALLOC_TRACE)
-/*
- * Add an allocation trace entry for an alloc call.
- */
-STATIC void
-xfs_alloc_trace_alloc(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_alloc_arg_t *args, /* allocation argument structure */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)args->mp,
- (void *)(__psunsigned_t)args->agno,
- (void *)(__psunsigned_t)args->agbno,
- (void *)(__psunsigned_t)args->minlen,
- (void *)(__psunsigned_t)args->maxlen,
- (void *)(__psunsigned_t)args->mod,
- (void *)(__psunsigned_t)args->prod,
- (void *)(__psunsigned_t)args->minleft,
- (void *)(__psunsigned_t)args->total,
- (void *)(__psunsigned_t)args->alignment,
- (void *)(__psunsigned_t)args->len,
- (void *)((((__psint_t)args->type) << 16) |
- (__psint_t)args->otype),
- (void *)(__psint_t)((args->wasdel << 3) |
- (args->wasfromfl << 2) |
- (args->isfl << 1) |
- (args->userdata << 0)));
-}
-
-/*
- * Add an allocation trace entry for a free call.
- */
-STATIC void
-xfs_alloc_trace_free(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int isfl, /* set if is freelist allocation/free */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)isfl,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add an allocation trace entry for modifying an agf.
- */
-STATIC void
-xfs_alloc_trace_modagf(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agf_t *agf, /* new agf value */
- int flags, /* logging flags for agf */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psint_t)flags,
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
-}
-
-STATIC void
-xfs_alloc_trace_busy(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int slot, /* perag Busy slot */
- xfs_trans_t *tp,
- int trtype, /* type: add, delete, search */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(trtype | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)slot,
- (void *)tp,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-#endif /* XFS_ALLOC_TRACE */
-
/*
* Allocation group level functions.
*/
@@ -665,9 +524,6 @@ xfs_alloc_ag_vextent(
*/
if (args->agbno != NULLAGBLOCK) {
xfs_agf_t *agf; /* allocation group freelist header */
-#ifdef XFS_ALLOC_TRACE
- xfs_mount_t *mp = args->mp;
-#endif
long slen = (long)args->len;
ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
@@ -682,7 +538,6 @@ xfs_alloc_ag_vextent(
args->pag->pagf_freeblks -= args->len;
ASSERT(be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length));
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
xfs_alloc_log_agf(args->tp, args->agbp,
XFS_AGF_FREEBLKS);
/* search the busylist for these blocks */
@@ -792,13 +647,14 @@ xfs_alloc_ag_vextent_exact(
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("normal", args);
+
+ trace_xfs_alloc_exact_done(args);
args->wasfromfl = 0;
return 0;
error0:
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_exact_error(args);
return error;
}
@@ -958,7 +814,7 @@ xfs_alloc_ag_vextent_near(
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
return 0;
}
blen = args->len;
@@ -981,7 +837,8 @@ xfs_alloc_ag_vextent_near(
goto error0;
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- TRACE_ALLOC("first", args);
+
+ trace_xfs_alloc_near_first(args);
return 0;
}
/*
@@ -1272,7 +1129,7 @@ xfs_alloc_ag_vextent_near(
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
- TRACE_ALLOC("neither", args);
+ trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
}
@@ -1299,7 +1156,7 @@ xfs_alloc_ag_vextent_near(
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
return 0;
@@ -1314,13 +1171,18 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
ltnew, rlen, XFSA_FIXUP_BNO_OK)))
goto error0;
- TRACE_ALLOC(j ? "gt" : "lt", args);
+
+ if (j)
+ trace_xfs_alloc_near_greater(args);
+ else
+ trace_xfs_alloc_near_lesser(args);
+
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_near_error(args);
if (cnt_cur != NULL)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur_lt != NULL)
@@ -1371,7 +1233,7 @@ xfs_alloc_ag_vextent_size(
goto error0;
if (i == 0 || flen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("noentry", args);
+ trace_xfs_alloc_size_noentry(args);
return 0;
}
ASSERT(i == 1);
@@ -1448,7 +1310,7 @@ xfs_alloc_ag_vextent_size(
xfs_alloc_fix_len(args);
if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_size_nominleft(args);
args->agbno = NULLAGBLOCK;
return 0;
}
@@ -1471,11 +1333,11 @@ xfs_alloc_ag_vextent_size(
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_size_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_size_error(args);
if (cnt_cur)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur)
@@ -1534,7 +1396,7 @@ xfs_alloc_ag_vextent_small(
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
args->wasfromfl = 1;
- TRACE_ALLOC("freelist", args);
+ trace_xfs_alloc_small_freelist(args);
*stat = 0;
return 0;
}
@@ -1556,17 +1418,17 @@ xfs_alloc_ag_vextent_small(
*/
if (flen < args->minlen) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("notenough", args);
+ trace_xfs_alloc_small_notenough(args);
flen = 0;
}
*fbnop = fbno;
*flenp = flen;
*stat = 1;
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_small_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_small_error(args);
return error;
}
@@ -1809,17 +1671,14 @@ xfs_free_ag_extent(
be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length),
error0);
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
if (!isfl)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
XFS_STATS_INC(xs_freex);
XFS_STATS_ADD(xs_freeb, len);
}
- TRACE_FREE(haveleft ?
- (haveright ? "both" : "left") :
- (haveright ? "right" : "none"),
- agno, bno, len, isfl);
+
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
/*
* Since blocks move to the free list without the coordination
@@ -1836,7 +1695,7 @@ xfs_free_ag_extent(
return 0;
error0:
- TRACE_FREE("error", agno, bno, len, isfl);
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
@@ -2122,7 +1981,6 @@ xfs_alloc_get_freelist(
logflags |= XFS_AGF_BTREEBLKS;
}
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
*bnop = bno;
@@ -2165,6 +2023,8 @@ xfs_alloc_log_agf(
sizeof(xfs_agf_t)
};
+ trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
+
xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
}
@@ -2230,13 +2090,11 @@ xfs_alloc_put_freelist(
logflags |= XFS_AGF_BTREEBLKS;
}
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
*blockp = cpu_to_be32(bno);
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
xfs_trans_log_buf(tp, agflbp,
(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
@@ -2399,7 +2257,7 @@ xfs_alloc_vextent(
args->minlen > args->maxlen || args->minlen > agsize ||
args->mod >= args->prod) {
args->fsbno = NULLFSBLOCK;
- TRACE_ALLOC("badargs", args);
+ trace_xfs_alloc_vextent_badargs(args);
return 0;
}
minleft = args->minleft;
@@ -2418,12 +2276,12 @@ xfs_alloc_vextent(
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
if (!args->agbp) {
up_read(&mp->m_peraglock);
- TRACE_ALLOC("noagbp", args);
+ trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
@@ -2488,7 +2346,7 @@ xfs_alloc_vextent(
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
/*
@@ -2499,7 +2357,9 @@ xfs_alloc_vextent(
goto error0;
break;
}
- TRACE_ALLOC("loopfailed", args);
+
+ trace_xfs_alloc_vextent_loopfailed(args);
+
/*
* Didn't work, figure out the next iteration.
*/
@@ -2526,7 +2386,7 @@ xfs_alloc_vextent(
if (args->agno == sagno) {
if (no_min == 1) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("allfailed", args);
+ trace_xfs_alloc_vextent_allfailed(args);
break;
}
if (flags == 0) {
@@ -2642,16 +2502,16 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
}
}
+ trace_xfs_alloc_busy(mp, agno, bno, len, n);
+
if (n < XFS_PAGB_NUM_SLOTS) {
bsy = &mp->m_perag[agno].pagb_list[n];
mp->m_perag[agno].pagb_count++;
- TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
bsy->busy_start = bno;
bsy->busy_length = len;
bsy->busy_tp = tp;
xfs_trans_add_busy(tp, agno, n);
} else {
- TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
/*
* The busy list is full! Since it is now not possible to
* track the free block, make this a synchronous transaction
@@ -2678,12 +2538,12 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
list = mp->m_perag[agno].pagb_list;
ASSERT(idx < XFS_PAGB_NUM_SLOTS);
+
+ trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
+
if (list[idx].busy_tp == tp) {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
list[idx].busy_tp = NULL;
mp->m_perag[agno].pagb_count--;
- } else {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
}
spin_unlock(&mp->m_perag[agno].pagb_lock);
@@ -2724,24 +2584,22 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
if ((bno > bend) || (uend < bsy->busy_start)) {
cnt--;
} else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy",
- "found1", agno, bno, len, tp);
break;
}
}
}
+ trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
+
/*
* If a block was found, force the log through the LSN of the
* transaction that freed the block
*/
if (cnt) {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
lsn = bsy->busy_tp->t_commit_lsn;
spin_unlock(&mp->m_perag[agno].pagb_lock);
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
} else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
spin_unlock(&mp->m_perag[agno].pagb_lock);
}
}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index e704caee10df..599bffa39784 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -37,6 +37,15 @@ typedef enum xfs_alloctype
XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
} xfs_alloctype_t;
+#define XFS_ALLOC_TYPES \
+ { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
+ { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
+ { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
+ { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
+ { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
+ { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
+ { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
+
/*
* Flags for xfs_alloc_fix_freelist.
*/
@@ -109,24 +118,6 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
#ifdef __KERNEL__
-#if defined(XFS_ALLOC_TRACE)
-/*
- * Allocation tracing buffer size.
- */
-#define XFS_ALLOC_TRACE_SIZE 4096
-extern ktrace_t *xfs_alloc_trace_buf;
-
-/*
- * Types for alloc tracing.
- */
-#define XFS_ALLOC_KTRACE_ALLOC 1
-#define XFS_ALLOC_KTRACE_FREE 2
-#define XFS_ALLOC_KTRACE_MODAGF 3
-#define XFS_ALLOC_KTRACE_BUSY 4
-#define XFS_ALLOC_KTRACE_UNBUSY 5
-#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
-#endif
-
void
xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index c10c3a292d30..adbd9141aea1 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -39,6 +39,7 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 8fe6f6b78a4a..e953b6cfb2a8 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
* xfs_attr.c
@@ -89,10 +90,6 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
-#if defined(XFS_ATTR_TRACE)
-ktrace_t *xfs_attr_trace_buf;
-#endif
-
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
@@ -640,7 +637,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
return EIO;
xfs_ilock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall start", context);
/*
* Decide on what work routines to call based on the inode size.
@@ -656,7 +652,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
}
xfs_iunlock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall end", context);
return error;
}
@@ -702,7 +697,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
context->count * sizeof(alist->al_offset[0]);
context->firstu -= ATTR_ENTSIZE(namelen);
if (context->firstu < arraytop) {
- xfs_attr_trace_l_c("buffer full", context);
+ trace_xfs_attr_list_full(context);
alist->al_more = 1;
context->seen_enough = 1;
return 1;
@@ -714,7 +709,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
aep->a_name[namelen] = 0;
alist->al_offset[context->count++] = context->firstu;
alist->al_count = context->count;
- xfs_attr_trace_l_c("add", context);
+ trace_xfs_attr_list_add(context);
return 0;
}
@@ -1853,7 +1848,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
node = bp->data;
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
- xfs_attr_trace_l_cn("wrong blk", context, node);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
break;
@@ -1861,20 +1856,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
leaf = bp->data;
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
- xfs_attr_trace_l_cl("wrong blk",
- context, leaf);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
be32_to_cpu(leaf->entries[0].hashval)) {
- xfs_attr_trace_l_cl("maybe wrong blk",
- context, leaf);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
break;
default:
- xfs_attr_trace_l_c("wrong blk - ??", context);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
@@ -1919,8 +1912,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
if (cursor->hashval
<= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
- xfs_attr_trace_l_cb("descending",
- context, btree);
+ trace_xfs_attr_list_node_descend(context,
+ btree);
break;
}
}
@@ -2270,85 +2263,3 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
}
return(0);
}
-
-#if defined(XFS_ATTR_TRACE)
-/*
- * Add a trace buffer entry for an attr_list context structure.
- */
-void
-xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree node.
- */
-void
-xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
- (__psunsigned_t)be16_to_cpu(node->hdr.count),
- (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
- (__psunsigned_t)be32_to_cpu(node->btree[
- be16_to_cpu(node->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree element.
- */
-void
-xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
- (__psunsigned_t)be32_to_cpu(btree->hashval),
- (__psunsigned_t)be32_to_cpu(btree->before),
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a leaf block.
- */
-void
-xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
- (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
- (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
- (__psunsigned_t)be32_to_cpu(leaf->entries[
- be16_to_cpu(leaf->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for the arguments given to the routine,
- * generic form.
- */
-void
-xfs_attr_trace_enter(int type, char *where,
- struct xfs_attr_list_context *context,
- __psunsigned_t a13, __psunsigned_t a14,
- __psunsigned_t a15)
-{
- ASSERT(xfs_attr_trace_buf);
- ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
- (void *)((__psunsigned_t)where),
- (void *)((__psunsigned_t)context->dp),
- (void *)((__psunsigned_t)context->cursor->hashval),
- (void *)((__psunsigned_t)context->cursor->blkno),
- (void *)((__psunsigned_t)context->cursor->offset),
- (void *)((__psunsigned_t)context->alist),
- (void *)((__psunsigned_t)context->bufsize),
- (void *)((__psunsigned_t)context->count),
- (void *)((__psunsigned_t)context->firstu),
- NULL,
- (void *)((__psunsigned_t)context->dupcnt),
- (void *)((__psunsigned_t)context->flags),
- (void *)a13, (void *)a14, (void *)a15);
-}
-#endif /* XFS_ATTR_TRACE */
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 12f0be3a73d4..59b410ce69a1 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -48,6 +48,16 @@ struct xfs_attr_list_context;
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
+#define XFS_ATTR_FLAGS \
+ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
+ { ATTR_ROOT, "ROOT" }, \
+ { ATTR_TRUST, "TRUST" }, \
+ { ATTR_SECURE, "SECURE" }, \
+ { ATTR_CREATE, "CREATE" }, \
+ { ATTR_REPLACE, "REPLACE" }, \
+ { ATTR_KERNOTIME, "KERNOTIME" }, \
+ { ATTR_KERNOVAL, "KERNOVAL" }
+
/*
* The maximum size (into the kernel or returned from the kernel) of an
* attribute value or the buffer used for an attr_list() call. Larger
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 0b687351293f..baf41b5af756 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -42,6 +42,7 @@
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* xfs_attr_leaf.c
@@ -594,7 +595,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
cursor = context->cursor;
ASSERT(cursor != NULL);
- xfs_attr_trace_l_c("sf start", context);
+ trace_xfs_attr_list_sf(context);
/*
* If the buffer is large enough and the cursor is at the start,
@@ -627,7 +628,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
- xfs_attr_trace_l_c("sf big-gulp", context);
+ trace_xfs_attr_list_sf_all(context);
return(0);
}
@@ -653,7 +654,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe);
- xfs_attr_trace_l_c("sf corrupted", context);
kmem_free(sbuf);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -693,7 +693,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
if (i == nsbuf) {
kmem_free(sbuf);
- xfs_attr_trace_l_c("blk end", context);
return(0);
}
@@ -719,7 +718,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
kmem_free(sbuf);
- xfs_attr_trace_l_c("sf E-O-F", context);
return(0);
}
@@ -2323,7 +2321,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
cursor = context->cursor;
cursor->initted = 1;
- xfs_attr_trace_l_cl("blk start", context, leaf);
+ trace_xfs_attr_list_leaf(context);
/*
* Re-find our place in the leaf block if this is a new syscall.
@@ -2344,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
}
}
if (i == be16_to_cpu(leaf->hdr.count)) {
- xfs_attr_trace_l_c("not found", context);
+ trace_xfs_attr_list_notfound(context);
return(0);
}
} else {
@@ -2419,7 +2417,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
break;
cursor->offset++;
}
- xfs_attr_trace_l_cl("blk end", context, leaf);
+ trace_xfs_attr_list_leaf_end(context);
return(retval);
}
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index ea22839caed2..76ab7b0cbb3a 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -25,8 +25,6 @@
* to fit into the literal area of the inode.
*/
-struct xfs_inode;
-
/*
* Entries are packed toward the top as tight as possible.
*/
@@ -69,42 +67,4 @@ typedef struct xfs_attr_sf_sort {
(be16_to_cpu(((xfs_attr_shortform_t *) \
((dp)->i_afp->if_u1.if_data))->hdr.totsize))
-#if defined(XFS_ATTR_TRACE)
-/*
- * Kernel tracing support for attribute lists
- */
-struct xfs_attr_list_context;
-struct xfs_da_intnode;
-struct xfs_da_node_entry;
-struct xfs_attr_leafblock;
-
-#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_attr_trace_buf;
-
-/*
- * Trace record types.
- */
-#define XFS_ATTR_KTRACE_L_C 1 /* context */
-#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */
-#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
-#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
-
-void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
-void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node);
-void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree);
-void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf);
-void xfs_attr_trace_enter(int type, char *where,
- struct xfs_attr_list_context *context,
- __psunsigned_t a13, __psunsigned_t a14,
- __psunsigned_t a15);
-#else
-#define xfs_attr_trace_l_c(w,c)
-#define xfs_attr_trace_l_cn(w,c,n)
-#define xfs_attr_trace_l_cb(w,c,b)
-#define xfs_attr_trace_l_cl(w,c,l)
-#endif /* XFS_ATTR_TRACE */
-
#endif /* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8971fb09d387..98251cdc52aa 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -54,6 +54,7 @@
#include "xfs_buf_item.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#ifdef DEBUG
@@ -272,71 +273,6 @@ xfs_bmap_isaeof(
int whichfork, /* data or attribute fork */
char *aeof); /* return value */
-#ifdef XFS_BMAP_TRACE
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork); /* data or attr fork */
-
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
- xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
- xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_post_update(__func__,d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
-#else
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
-#endif /* XFS_BMAP_TRACE */
-
/*
* Compute the worst-case number of indirect blocks that will be used
* for ip's delayed extent of length "len".
@@ -363,18 +299,6 @@ xfs_bmap_validate_ret(
#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
#endif /* DEBUG */
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra);
-#else
-#define xfs_bunmap_trace(ip, bno, len, flags, ra)
-#endif /* XFS_RW_TRACE */
-
STATIC int
xfs_bmap_count_tree(
xfs_mount_t *mp,
@@ -590,9 +514,9 @@ xfs_bmap_add_extent(
* already extents in the list.
*/
if (nextents == 0) {
- XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL,
- whichfork);
- xfs_iext_insert(ifp, 0, 1, new);
+ xfs_iext_insert(ip, 0, 1, new,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
+
ASSERT(cur == NULL);
ifp->if_lastex = 0;
if (!isnullstartblock(new->br_startblock)) {
@@ -759,26 +683,10 @@ xfs_bmap_add_extent_delay_real(
xfs_filblks_t temp=0; /* value for dnew calculations */
xfs_filblks_t temp2=0;/* value for dnew calculations */
int tmp_rval; /* partial logging flags */
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
/*
* Set up a bunch of variables to make the tests simpler.
@@ -790,69 +698,80 @@ xfs_bmap_add_extent_delay_real(
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
/*
* Set flags determining what part of the previous delayed allocation
* extent is being replaced by a real allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == new->br_state &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == new->br_state &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
+
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- new->br_state == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ new->br_state == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
error = 0;
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 2, state);
ip->i_df.if_lastex = idx - 1;
ip->i_d.di_nextents--;
if (cur == NULL)
@@ -885,20 +804,18 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1, state);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -921,19 +838,19 @@ xfs_bmap_add_extent_delay_real(
PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
+ xfs_iext_remove(ip, idx + 1, 1, state);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -956,15 +873,16 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Filling in all of a previously delayed allocation extent.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -987,19 +905,20 @@ xfs_bmap_add_extent_delay_real(
temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
ip->i_df.if_lastex = idx - 1;
if (cur == NULL)
@@ -1021,7 +940,7 @@ xfs_bmap_add_extent_delay_real(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
startblockval(PREV.br_startblock));
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = LEFT.br_startoff;
@@ -1029,18 +948,16 @@ xfs_bmap_add_extent_delay_real(
PREV.br_blockcount;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, new_endoff);
temp = PREV.br_blockcount - new->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1071,27 +988,27 @@ xfs_bmap_add_extent_delay_real(
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx + 1);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is contiguous with the new allocation.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount,
RIGHT.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
ip->i_df.if_lastex = idx + 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1112,7 +1029,7 @@ xfs_bmap_add_extent_delay_real(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
startblockval(PREV.br_startblock));
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = PREV.br_startoff;
@@ -1120,17 +1037,15 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is not contiguous.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
+ xfs_iext_insert(ip, idx + 1, 1, new, state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1161,7 +1076,7 @@ xfs_bmap_add_extent_delay_real(
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
@@ -1175,7 +1090,7 @@ xfs_bmap_add_extent_delay_real(
* This case is avoided almost all the time.
*/
temp = new->br_startoff - PREV.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
r[0] = *new;
r[1].br_state = PREV.br_state;
@@ -1183,9 +1098,7 @@ xfs_bmap_add_extent_delay_real(
r[1].br_startoff = new_endoff;
temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
r[1].br_blockcount = temp2;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
+ xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1242,24 +1155,24 @@ xfs_bmap_add_extent_delay_real(
}
ep = xfs_iext_get_ext(ifp, idx);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
nullstartblock((int)temp2));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
*dnew = temp + temp2;
/* DELTA: One in-core extent is split in three. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
@@ -1279,14 +1192,6 @@ done:
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -1316,27 +1221,10 @@ xfs_bmap_add_extent_unwritten_real(
int state = 0;/* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
-
/*
* Set up a bunch of variables to make the tests simpler.
*/
@@ -1352,68 +1240,78 @@ xfs_bmap_add_extent_unwritten_real(
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
/*
* Set flags determining what part of the previous oldext allocation
* extent is being replaced by a newext allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == newext &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == newext &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- newext == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ newext == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 2, state);
ip->i_df.if_lastex = idx - 1;
ip->i_d.di_nextents -= 2;
if (cur == NULL)
@@ -1450,20 +1348,18 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1492,21 +1388,18 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
+ xfs_iext_remove(ip, idx + 1, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1535,17 +1428,16 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Setting all of a previous oldext extent to newext.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1566,27 +1458,25 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1617,22 +1507,21 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_blockcount;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
xfs_bmbt_set_startoff(ep, new_endoff);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1660,24 +1549,21 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = PREV.br_blockcount;
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is contiguous with the new allocation.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount, newext);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx + 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1707,18 +1593,17 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
+ xfs_iext_insert(ip, idx + 1, 1, new, state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1756,19 +1641,18 @@ xfs_bmap_add_extent_unwritten_real(
* newext. Contiguity is impossible here.
* One extent becomes three extents.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
new->br_startoff - PREV.br_startoff);
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
r[0] = *new;
r[1].br_startoff = new_endoff;
r[1].br_blockcount =
PREV.br_startoff + PREV.br_blockcount - new_endoff;
r[1].br_startblock = new->br_startblock + new->br_blockcount;
r[1].br_state = oldext;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
+ xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents += 2;
if (cur == NULL)
@@ -1813,13 +1697,13 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
@@ -1839,14 +1723,6 @@ done:
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -1872,62 +1748,57 @@ xfs_bmap_add_extent_hole_delay(
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0; /* temp for indirect calculations */
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ep = xfs_iext_get_ext(ifp, idx);
state = 0;
ASSERT(isnullstartblock(new->br_startblock));
+
/*
* Check and set flags if this segment has a left neighbor
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
+
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if the current (right) segment exists.
* If it doesn't exist, we're converting the hole at end-of-file.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
+
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* Set contiguity flags on the left and right neighbors.
* Don't let extents get too large, even if the pieces are contiguous.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- (left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN)));
+ if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ (left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN)))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the contiguity flags.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with delayed allocations
* on the left and on the right.
@@ -1935,8 +1806,8 @@ xfs_bmap_add_extent_hole_delay(
*/
temp = left.br_blockcount + new->br_blockcount +
right.br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock) +
@@ -1944,53 +1815,52 @@ xfs_bmap_add_extent_hole_delay(
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
nullstartblock((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 1, state);
ip->i_df.if_lastex = idx - 1;
/* DELTA: Two in-core extents were replaced by one. */
temp2 = temp;
temp = left.br_startoff;
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
temp = left.br_blockcount + new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
nullstartblock((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
temp = left.br_startoff;
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
temp = new->br_blockcount + right.br_blockcount;
oldlen = startblockval(new->br_startblock) +
startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_allf(ep, new->br_startoff,
nullstartblock((int)newlen), temp, right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
@@ -2004,9 +1874,7 @@ xfs_bmap_add_extent_hole_delay(
* Insert a new entry.
*/
oldlen = newlen = 0;
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
/* DELTA: A new in-core extent was added in a hole. */
temp2 = new->br_blockcount;
@@ -2030,12 +1898,6 @@ xfs_bmap_add_extent_hole_delay(
}
*logflagsp = 0;
return 0;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -2062,83 +1924,75 @@ xfs_bmap_add_extent_hole_real(
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
ep = xfs_iext_get_ext(ifp, idx);
state = 0;
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
/*
* Check and set flags if this segment has a left neighbor.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+ if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* We're inserting a real allocation between "left" and "right".
* Set the contiguity flags. Don't let extents get too large.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_startblock + left.br_blockcount == new->br_startblock &&
- left.br_state == new->br_state &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- right.br_startblock &&
- new->br_state == right.br_state &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN));
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_startblock + left.br_blockcount == new->br_startblock &&
+ left.br_state == new->br_state &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_startblock + new->br_blockcount == right.br_startblock &&
+ new->br_state == right.br_state &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
error = 0;
/*
* Select which case we're in here, and implement it.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with real allocations on the
* left and on the right.
* Merge all three into a single extent record.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
left.br_blockcount + new->br_blockcount +
right.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 1, state);
ifp->if_lastex = idx - 1;
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2173,16 +2027,17 @@ xfs_bmap_add_extent_hole_real(
right.br_blockcount;
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
left.br_blockcount + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ifp->if_lastex = idx - 1;
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
@@ -2207,17 +2062,18 @@ xfs_bmap_add_extent_hole_real(
new->br_blockcount;
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
new->br_blockcount + right.br_blockcount,
right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ifp->if_lastex = idx;
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
@@ -2248,8 +2104,7 @@ xfs_bmap_add_extent_hole_real(
* real allocation.
* Insert a new entry.
*/
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ifp->if_lastex = idx;
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2283,12 +2138,6 @@ xfs_bmap_add_extent_hole_real(
done:
*logflagsp = rval;
return error;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -3115,8 +2964,13 @@ xfs_bmap_del_extent(
uint qfield; /* quota field to update */
xfs_filblks_t temp; /* for indirect length calculations */
xfs_filblks_t temp2; /* for indirect length calculations */
+ int state = 0;
XFS_STATS_INC(xs_del_exlist);
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT((idx >= 0) && (idx < ifp->if_bytes /
@@ -3196,8 +3050,8 @@ xfs_bmap_del_extent(
/*
* Matches the whole extent. Delete the entry.
*/
- XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
ifp->if_lastex = idx;
if (delay)
break;
@@ -3217,7 +3071,7 @@ xfs_bmap_del_extent(
/*
* Deleting the first part of the extent.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, del_endoff);
temp = got.br_blockcount - del->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
@@ -3226,13 +3080,12 @@ xfs_bmap_del_extent(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
- whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
da_new = temp;
break;
}
xfs_bmbt_set_startblock(ep, del_endblock);
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
@@ -3248,19 +3101,18 @@ xfs_bmap_del_extent(
* Deleting the last part of the extent.
*/
temp = got.br_blockcount - del->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
ifp->if_lastex = idx;
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
- whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
da_new = temp;
break;
}
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
@@ -3277,7 +3129,7 @@ xfs_bmap_del_extent(
* Deleting the middle of the extent.
*/
temp = del->br_startoff - got.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
new.br_startoff = del_endoff;
temp2 = got_endoff - del_endoff;
@@ -3364,10 +3216,8 @@ xfs_bmap_del_extent(
}
}
}
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork);
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL,
- whichfork);
- xfs_iext_insert(ifp, idx + 1, 1, &new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+ xfs_iext_insert(ip, idx + 1, 1, &new, state);
ifp->if_lastex = idx + 1;
break;
}
@@ -3687,7 +3537,9 @@ xfs_bmap_local_to_extents(
xfs_iext_add(ifp, 0, 1);
ep = xfs_iext_get_ext(ifp, 0);
xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
- XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
+ trace_xfs_bmap_post_update(ip, 0,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
+ _THIS_IP_);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
@@ -3800,158 +3652,6 @@ xfs_bmap_search_extents(
return ep;
}
-
-#ifdef XFS_BMAP_TRACE
-ktrace_t *xfs_bmap_trace_buf;
-
-/*
- * Add a bmap trace buffer entry. Base routine for the others.
- */
-STATIC void
-xfs_bmap_trace_addentry(
- int opcode, /* operation */
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(ies) */
- xfs_extnum_t cnt, /* count of entries, 1 or 2 */
- xfs_bmbt_rec_host_t *r1, /* first record */
- xfs_bmbt_rec_host_t *r2, /* second record or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr2;
-
- ASSERT(cnt == 1 || cnt == 2);
- ASSERT(r1 != NULL);
- if (cnt == 1) {
- ASSERT(r2 == NULL);
- r2 = &tr2;
- memset(&tr2, 0, sizeof(tr2));
- } else
- ASSERT(r2 != NULL);
- ktrace_enter(xfs_bmap_trace_buf,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
- ASSERT(ip->i_xtrace);
- ktrace_enter(ip->i_xtrace,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
- cnt, xfs_iext_get_ext(ifp, idx),
- cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
- whichfork);
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr1; /* compressed record 1 */
- xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */
-
- xfs_bmbt_set_all(&tr1, r1);
- if (cnt == 2) {
- ASSERT(r2 != NULL);
- xfs_bmbt_set_all(&tr2, r2);
- } else {
- ASSERT(cnt == 1);
- ASSERT(r2 == NULL);
- }
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
- cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
- 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
- xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-#endif /* XFS_BMAP_TRACE */
-
/*
* Compute the worst-case number of indirect blocks that will be used
* for ip's delayed extent of length "len".
@@ -3983,37 +3683,6 @@ xfs_bmap_worst_indlen(
return rval;
}
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_BUNMAP,
- (void *)ip,
- (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
- (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
- (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
- (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
- (void *)(__psint_t)len,
- (void *)(__psint_t)flags,
- (void *)(unsigned long)current_cpu(),
- (void *)ra,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0);
-}
-#endif
-
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
@@ -4702,34 +4371,30 @@ error0:
return XFS_ERROR(EFSCORRUPTED);
}
-#ifdef XFS_BMAP_TRACE
+#ifdef DEBUG
/*
* Add bmap trace insert entries for all the contents of the extent records.
*/
void
xfs_bmap_trace_exlist(
- const char *fname, /* function name */
xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in the list */
- int whichfork) /* data or attr fork */
+ int whichfork, /* data or attr fork */
+ unsigned long caller_ip)
{
- xfs_bmbt_rec_host_t *ep; /* current extent record */
xfs_extnum_t idx; /* extent record index */
xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_irec_t s; /* file extent record */
+ int state = 0;
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
- for (idx = 0; idx < cnt; idx++) {
- ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_get_all(ep, &s);
- XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
- whichfork);
- }
+ for (idx = 0; idx < cnt; idx++)
+ trace_xfs_extlist(ip, idx, whichfork, caller_ip);
}
-#endif
-#ifdef DEBUG
/*
* Validate that the bmbt_irecs being returned from bmapi are valid
* given the callers original parameters. Specifically check the
@@ -5478,7 +5143,8 @@ xfs_bunmapi(
int rsvd; /* OK to allocate reserved blocks */
xfs_fsblock_t sum;
- xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
+ trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
+
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 56f62d2edc35..419dafb9d87d 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -95,6 +95,21 @@ typedef struct xfs_bmap_free
/* need write cache flushing and no */
/* additional allocation alignments */
+#define XFS_BMAPI_FLAGS \
+ { XFS_BMAPI_WRITE, "WRITE" }, \
+ { XFS_BMAPI_DELAY, "DELAY" }, \
+ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
+ { XFS_BMAPI_METADATA, "METADATA" }, \
+ { XFS_BMAPI_EXACT, "EXACT" }, \
+ { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
+ { XFS_BMAPI_ASYNC, "ASYNC" }, \
+ { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
+ { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
+ { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
+ { XFS_BMAPI_CONTIG, "CONTIG" }, \
+ { XFS_BMAPI_CONVERT, "CONVERT" }
+
+
static inline int xfs_bmapi_aflag(int w)
{
return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -135,36 +150,43 @@ typedef struct xfs_bmalloca {
char conv; /* overwriting unwritten extents */
} xfs_bmalloca_t;
-#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE)
/*
- * Trace operations for bmap extent tracing
+ * Flags for xfs_bmap_add_extent*.
*/
-#define XFS_BMAP_KTRACE_DELETE 1
-#define XFS_BMAP_KTRACE_INSERT 2
-#define XFS_BMAP_KTRACE_PRE_UP 3
-#define XFS_BMAP_KTRACE_POST_UP 4
-
-#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmap_trace_buf;
+#define BMAP_LEFT_CONTIG (1 << 0)
+#define BMAP_RIGHT_CONTIG (1 << 1)
+#define BMAP_LEFT_FILLING (1 << 2)
+#define BMAP_RIGHT_FILLING (1 << 3)
+#define BMAP_LEFT_DELAY (1 << 4)
+#define BMAP_RIGHT_DELAY (1 << 5)
+#define BMAP_LEFT_VALID (1 << 6)
+#define BMAP_RIGHT_VALID (1 << 7)
+#define BMAP_ATTRFORK (1 << 8)
+
+#define XFS_BMAP_EXT_FLAGS \
+ { BMAP_LEFT_CONTIG, "LC" }, \
+ { BMAP_RIGHT_CONTIG, "RC" }, \
+ { BMAP_LEFT_FILLING, "LF" }, \
+ { BMAP_RIGHT_FILLING, "RF" }, \
+ { BMAP_ATTRFORK, "ATTR" }
/*
* Add bmap trace insert entries for all the contents of the extent list.
+ *
+ * Quite excessive tracing. Only do this for debug builds.
*/
+#if defined(__KERNEL) && defined(DEBUG)
void
xfs_bmap_trace_exlist(
- const char *fname, /* function name */
struct xfs_inode *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in list */
- int whichfork); /* data or attr fork */
+ int whichfork,
+ unsigned long caller_ip); /* data or attr fork */
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
- xfs_bmap_trace_exlist(__func__,ip,c,w)
-
-#else /* __KERNEL__ && XFS_BMAP_TRACE */
-
+ xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
+#else
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
-
-#endif /* __KERNEL__ && XFS_BMAP_TRACE */
+#endif
/*
* Convert inode from non-attributed to attributed.
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 6f5ccede63f9..38751d5fac6f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -768,12 +768,6 @@ xfs_bmbt_trace_enter(
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
(void *)a4, (void *)a5, (void *)a6, (void *)a7,
(void *)a8, (void *)a9, (void *)a10);
- ktrace_enter(ip->i_btrace,
- (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
- (void *)func, (void *)s, (void *)ip, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
}
STATIC void
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 52b5f14d0c32..36a0992dd669 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -39,6 +39,7 @@
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Cursor allocation zone.
@@ -81,7 +82,7 @@ xfs_btree_check_lblock(
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
if (bp)
- xfs_buftrace("LBTREE ERROR", bp);
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
@@ -119,7 +120,7 @@ xfs_btree_check_sblock(
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
if (bp)
- xfs_buftrace("SBTREE ERROR", bp);
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
XFS_ERRLEVEL_LOW, cur->bc_mp, block);
return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h
index b3f5eb3c3c6c..2d8a309873ea 100644
--- a/fs/xfs/xfs_btree_trace.h
+++ b/fs/xfs/xfs_btree_trace.h
@@ -58,8 +58,6 @@ void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
struct xfs_buf *, int, int);
void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
struct xfs_buf *, int, int, int);
-void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *,
- xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int);
void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
union xfs_btree_ptr, union xfs_btree_key *, int);
@@ -71,24 +69,10 @@ void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
union xfs_btree_rec *, int);
void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
-
-#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_allocbt_trace_buf;
-
-#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_inobt_trace_buf;
-
-#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmbt_trace_buf;
-
-
#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
-#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \
- xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
#define XFS_BTREE_TRACE_ARGI(c, i) \
xfs_btree_trace_argi(__func__, c, i, __LINE__)
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
@@ -104,7 +88,6 @@ extern ktrace_t *xfs_bmbt_trace_buf;
#else
#define XFS_BTREE_TRACE_ARGBI(c, b, i)
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
-#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j)
#define XFS_BTREE_TRACE_ARGI(c, i)
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 92af4098c7e8..a30f7e9eb2b9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -29,6 +29,7 @@
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_buf_item_zone;
@@ -164,7 +165,7 @@ xfs_buf_item_size(
* is the buf log format structure with the
* cancel flag in it.
*/
- xfs_buf_item_trace("SIZE STALE", bip);
+ trace_xfs_buf_item_size_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
return 1;
}
@@ -206,7 +207,7 @@ xfs_buf_item_size(
}
}
- xfs_buf_item_trace("SIZE NORM", bip);
+ trace_xfs_buf_item_size(bip);
return nvecs;
}
@@ -259,7 +260,7 @@ xfs_buf_item_format(
* is the buf log format structure with the
* cancel flag in it.
*/
- xfs_buf_item_trace("FORMAT STALE", bip);
+ trace_xfs_buf_item_format_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
bip->bli_format.blf_size = nvecs;
return;
@@ -335,7 +336,7 @@ xfs_buf_item_format(
/*
* Check to make sure everything is consistent.
*/
- xfs_buf_item_trace("FORMAT NORM", bip);
+ trace_xfs_buf_item_format(bip);
xfs_buf_item_log_check(bip);
}
@@ -355,8 +356,7 @@ xfs_buf_item_pin(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PIN", bip);
- xfs_buftrace("XFS_PIN", bp);
+ trace_xfs_buf_item_pin(bip);
xfs_bpin(bp);
}
@@ -383,8 +383,7 @@ xfs_buf_item_unpin(
ASSERT(bp != NULL);
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- xfs_buf_item_trace("UNPIN", bip);
- xfs_buftrace("XFS_UNPIN", bp);
+ trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
ailp = bip->bli_item.li_ailp;
@@ -395,8 +394,8 @@ xfs_buf_item_unpin(
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- xfs_buf_item_trace("UNPIN STALE", bip);
- xfs_buftrace("XFS_UNPIN STALE", bp);
+ trace_xfs_buf_item_unpin_stale(bip);
+
/*
* If we get called here because of an IO error, we may
* or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -440,8 +439,8 @@ xfs_buf_item_unpin_remove(
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
- xfs_buf_item_trace("UNPIN REMOVE", bip);
- xfs_buftrace("XFS_UNPIN_REMOVE", bp);
+ trace_xfs_buf_item_unpin_stale(bip);
+
/*
* yes -- clear the xaction descriptor in-use flag
* and free the chunk if required. We can safely
@@ -495,7 +494,7 @@ xfs_buf_item_trylock(
XFS_BUF_HOLD(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("TRYLOCK SUCCESS", bip);
+ trace_xfs_buf_item_trylock(bip);
return XFS_ITEM_SUCCESS;
}
@@ -524,7 +523,6 @@ xfs_buf_item_unlock(
uint hold;
bp = bip->bli_buf;
- xfs_buftrace("XFS_UNLOCK", bp);
/*
* Clear the buffer's association with this transaction.
@@ -547,7 +545,7 @@ xfs_buf_item_unlock(
*/
if (bip->bli_flags & XFS_BLI_STALE) {
bip->bli_flags &= ~XFS_BLI_LOGGED;
- xfs_buf_item_trace("UNLOCK STALE", bip);
+ trace_xfs_buf_item_unlock_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
if (!aborted)
return;
@@ -574,7 +572,7 @@ xfs_buf_item_unlock(
* release the buffer at the end of this routine.
*/
hold = bip->bli_flags & XFS_BLI_HOLD;
- xfs_buf_item_trace("UNLOCK", bip);
+ trace_xfs_buf_item_unlock(bip);
/*
* If the buf item isn't tracking any data, free it.
@@ -618,7 +616,8 @@ xfs_buf_item_committed(
xfs_buf_log_item_t *bip,
xfs_lsn_t lsn)
{
- xfs_buf_item_trace("COMMITTED", bip);
+ trace_xfs_buf_item_committed(bip);
+
if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
(bip->bli_item.li_lsn != 0)) {
return bip->bli_item.li_lsn;
@@ -640,7 +639,7 @@ xfs_buf_item_push(
xfs_buf_t *bp;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PUSH", bip);
+ trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
@@ -738,9 +737,6 @@ xfs_buf_item_init(
bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
bip->bli_format.blf_map_size = map_size;
-#ifdef XFS_BLI_TRACE
- bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
-#endif
#ifdef XFS_TRANS_DEBUG
/*
@@ -878,9 +874,6 @@ xfs_buf_item_free(
kmem_free(bip->bli_logged);
#endif /* XFS_TRANS_DEBUG */
-#ifdef XFS_BLI_TRACE
- ktrace_free(bip->bli_trace);
-#endif
kmem_zone_free(xfs_buf_item_zone, bip);
}
@@ -897,7 +890,8 @@ xfs_buf_item_relse(
{
xfs_buf_log_item_t *bip;
- xfs_buftrace("XFS_RELSE", bp);
+ trace_xfs_buf_item_relse(bp, _RET_IP_);
+
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
@@ -994,7 +988,7 @@ xfs_buf_iodone_callbacks(
if (XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
XFS_BUF_SUPER_STALE(bp);
- xfs_buftrace("BUF_IODONE_CB", bp);
+ trace_xfs_buf_item_iodone(bp, _RET_IP_);
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1030,7 +1024,7 @@ xfs_buf_iodone_callbacks(
XFS_BUF_SET_START(bp);
}
ASSERT(XFS_BUF_IODONE_FUNC(bp));
- xfs_buftrace("BUF_IODONE ASYNC", bp);
+ trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_relse(bp);
} else {
/*
@@ -1053,9 +1047,7 @@ xfs_buf_iodone_callbacks(
}
return;
}
-#ifdef XFSERRORDEBUG
- xfs_buftrace("XFS BUFCB NOERR", bp);
-#endif
+
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1081,7 +1073,9 @@ xfs_buf_error_relse(
XFS_BUF_DONE(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_ERROR(bp,0);
- xfs_buftrace("BUF_ERROR_RELSE", bp);
+
+ trace_xfs_buf_error_relse(bp, _RET_IP_);
+
if (! XFS_FORCED_SHUTDOWN(mp))
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
/*
@@ -1128,34 +1122,3 @@ xfs_buf_iodone(
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_free(bip);
}
-
-#if defined(XFS_BLI_TRACE)
-void
-xfs_buf_item_trace(
- char *id,
- xfs_buf_log_item_t *bip)
-{
- xfs_buf_t *bp;
- ASSERT(bip->bli_trace != NULL);
-
- bp = bip->bli_buf;
- ktrace_enter(bip->bli_trace,
- (void *)id,
- (void *)bip->bli_buf,
- (void *)((unsigned long)bip->bli_flags),
- (void *)((unsigned long)bip->bli_recur),
- (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
- (void *)((unsigned long)
- (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
- (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
- (void *)((unsigned long)XFS_BUF_COUNT(bp)),
- (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
- XFS_BUF_FSPRIVATE(bp, void *),
- XFS_BUF_FSPRIVATE2(bp, void *),
- (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
- (void *)XFS_BUF_IODONE_FUNC(bp),
- (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
- (void *)bip->bli_item.li_desc,
- (void *)((unsigned long)bip->bli_item.li_flags));
-}
-#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 5a41c348bb1c..217f34af00cb 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -70,22 +70,21 @@ typedef struct xfs_buf_log_format_t {
#define XFS_BLI_INODE_ALLOC_BUF 0x10
#define XFS_BLI_STALE_INODE 0x20
+#define XFS_BLI_FLAGS \
+ { XFS_BLI_HOLD, "HOLD" }, \
+ { XFS_BLI_DIRTY, "DIRTY" }, \
+ { XFS_BLI_STALE, "STALE" }, \
+ { XFS_BLI_LOGGED, "LOGGED" }, \
+ { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
+ { XFS_BLI_STALE_INODE, "STALE_INODE" }
+
#ifdef __KERNEL__
struct xfs_buf;
-struct ktrace;
struct xfs_mount;
struct xfs_buf_log_item;
-#if defined(XFS_BLI_TRACE)
-#define XFS_BLI_TRACE_SIZE 32
-
-void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
-#else
-#define xfs_buf_item_trace(id, bip)
-#endif
-
/*
* This is the in core log item structure used to track information
* needed to log buffers. It tracks how many times the lock has been
@@ -97,9 +96,6 @@ typedef struct xfs_buf_log_item {
unsigned int bli_flags; /* misc flags */
unsigned int bli_recur; /* lock recursion count */
atomic_t bli_refcount; /* cnt of tp refs */
-#ifdef XFS_BLI_TRACE
- struct ktrace *bli_trace; /* event trace buf */
-#endif
#ifdef XFS_TRANS_DEBUG
char *bli_orig; /* original buffer copy */
char *bli_logged; /* bytes logged (bitmap) */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 2847bbc1c534..c0c8869115b1 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -46,6 +46,7 @@
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* xfs_da_btree.c
@@ -2107,7 +2108,7 @@ xfs_da_do_buf(
(be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
- xfs_buftrace("DA READ ERROR", rbp->bps[0]);
+ trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 8c536167bf75..30cd08f56a3a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -125,6 +125,13 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
+#define XFS_DA_OP_FLAGS \
+ { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
+ { XFS_DA_OP_RENAME, "RENAME" }, \
+ { XFS_DA_OP_ADDNAME, "ADDNAME" }, \
+ { XFS_DA_OP_OKNOENT, "OKNOENT" }, \
+ { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
+
/*
* Structure to describe buffer(s) for a block.
* This is needed in the directory version 2 format case, when
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index ab89a7e94a0f..d1483a4f71b8 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -43,6 +43,7 @@
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
* Syssgi interface for swapext
@@ -168,7 +169,6 @@ xfs_swap_extents(
}
if (VN_CACHED(VFS_I(tip)) != 0) {
- xfs_inval_cached_trace(tip, 0, -1, 0, -1);
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
if (error)
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index bb1d58eb3982..93634a7e90e9 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -40,9 +40,9 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
struct xfs_name xfs_name_dotdot = {"..", 2};
@@ -525,7 +525,8 @@ xfs_dir2_grow_inode(
xfs_trans_t *tp;
xfs_drfsbno_t nblks;
- xfs_dir2_trace_args_s("grow_inode", args, space);
+ trace_xfs_dir2_grow_inode(args, space);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -703,7 +704,8 @@ xfs_dir2_shrink_inode(
xfs_mount_t *mp;
xfs_trans_t *tp;
- xfs_dir2_trace_args_db("shrink_inode", args, db, bp);
+ trace_xfs_dir2_shrink_inode(args, db);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ab52e9e1c1ee..ddc4ecc7807f 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -36,8 +36,8 @@
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Local function prototypes.
@@ -94,7 +94,8 @@ xfs_dir2_block_addname(
__be16 *tagp; /* pointer to tag value */
xfs_trans_t *tp; /* transaction structure */
- xfs_dir2_trace_args("block_addname", args);
+ trace_xfs_dir2_block_addname(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -590,7 +591,8 @@ xfs_dir2_block_lookup(
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
- xfs_dir2_trace_args("block_lookup", args);
+ trace_xfs_dir2_block_lookup(args);
+
/*
* Get the buffer, look up the entry.
* If not found (ENOENT) then return, have no buffer.
@@ -747,7 +749,8 @@ xfs_dir2_block_removename(
int size; /* shortform size */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("block_removename", args);
+ trace_xfs_dir2_block_removename(args);
+
/*
* Look up the entry in the block. Gets the buffer and entry index.
* It will always be there, the vnodeops level does a lookup first.
@@ -823,7 +826,8 @@ xfs_dir2_block_replace(
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
- xfs_dir2_trace_args("block_replace", args);
+ trace_xfs_dir2_block_replace(args);
+
/*
* Lookup the entry in the directory. Get buffer and entry index.
* This will always succeed since the caller has already done a lookup.
@@ -897,7 +901,8 @@ xfs_dir2_leaf_to_block(
int to; /* block/leaf to index */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp);
+ trace_xfs_dir2_leaf_to_block(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1044,7 +1049,8 @@ xfs_dir2_sf_to_block(
xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
- xfs_dir2_trace_args("sf_to_block", args);
+ trace_xfs_dir2_sf_to_block(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 41ad537c49e9..29f484c11b3a 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,8 +38,8 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Local function declarations.
@@ -80,7 +80,8 @@ xfs_dir2_block_to_leaf(
int needscan; /* need to rescan bestfree */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_b("block_to_leaf", args, dbp);
+ trace_xfs_dir2_block_to_leaf(args);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -188,7 +189,8 @@ xfs_dir2_leaf_addname(
xfs_trans_t *tp; /* transaction pointer */
xfs_dir2_db_t use_block; /* data block number */
- xfs_dir2_trace_args("leaf_addname", args);
+ trace_xfs_dir2_leaf_addname(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1266,7 +1268,8 @@ xfs_dir2_leaf_lookup(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_lookup", args);
+ trace_xfs_dir2_leaf_lookup(args);
+
/*
* Look up name in the leaf block, returning both buffers and index.
*/
@@ -1454,7 +1457,8 @@ xfs_dir2_leaf_removename(
xfs_dir2_data_off_t oldbest; /* old value of best free */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_removename", args);
+ trace_xfs_dir2_leaf_removename(args);
+
/*
* Lookup the leaf entry, get the leaf and data blocks read in.
*/
@@ -1586,7 +1590,8 @@ xfs_dir2_leaf_replace(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_replace", args);
+ trace_xfs_dir2_leaf_replace(args);
+
/*
* Look up the entry.
*/
@@ -1766,7 +1771,9 @@ xfs_dir2_node_to_leaf(
if (state->path.active > 1)
return 0;
args = state->args;
- xfs_dir2_trace_args("node_to_leaf", args);
+
+ trace_xfs_dir2_node_to_leaf(args);
+
mp = state->mp;
dp = args->dp;
tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 5a81ccd1045b..ce6e355199b5 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -37,8 +37,8 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Function declarations.
@@ -123,7 +123,8 @@ xfs_dir2_leaf_to_node(
__be16 *to; /* pointer to freespace entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_b("leaf_to_node", args, lbp);
+ trace_xfs_dir2_leaf_to_node(args);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -196,7 +197,8 @@ xfs_dir2_leafn_add(
xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_sb("leafn_add", args, index, bp);
+ trace_xfs_dir2_leafn_add(args, index);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -711,8 +713,8 @@ xfs_dir2_leafn_moveents(
int stale; /* count stale leaves copied */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d,
- start_d, count);
+ trace_xfs_dir2_leafn_moveents(args, start_s, start_d, count);
+
/*
* Silently return if nothing to do.
*/
@@ -933,7 +935,8 @@ xfs_dir2_leafn_remove(
int needscan; /* need to rescan data frees */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_sb("leafn_remove", args, index, bp);
+ trace_xfs_dir2_leafn_remove(args, index);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1363,7 +1366,8 @@ xfs_dir2_node_addname(
int rval; /* sub-return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_addname", args);
+ trace_xfs_dir2_node_addname(args);
+
/*
* Allocate and initialize the state (btree cursor).
*/
@@ -1822,7 +1826,8 @@ xfs_dir2_node_lookup(
int rval; /* operation return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_lookup", args);
+ trace_xfs_dir2_node_lookup(args);
+
/*
* Allocate and initialize the btree cursor.
*/
@@ -1875,7 +1880,8 @@ xfs_dir2_node_removename(
int rval; /* operation return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_removename", args);
+ trace_xfs_dir2_node_removename(args);
+
/*
* Allocate and initialize the btree cursor.
*/
@@ -1944,7 +1950,8 @@ xfs_dir2_node_replace(
int rval; /* internal return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_replace", args);
+ trace_xfs_dir2_node_replace(args);
+
/*
* Allocate and initialize the btree cursor.
*/
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index e89734e84646..9d4f17a69676 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -37,7 +37,7 @@
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
-#include "xfs_dir2_trace.h"
+#include "xfs_trace.h"
/*
* Prototypes for internal functions.
@@ -169,7 +169,8 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_ino_t temp;
- xfs_dir2_trace_args_sb("block_to_sf", args, size, bp);
+ trace_xfs_dir2_block_to_sf(args);
+
dp = args->dp;
mp = dp->i_mount;
@@ -281,7 +282,8 @@ xfs_dir2_sf_addname(
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
- xfs_dir2_trace_args("sf_addname", args);
+ trace_xfs_dir2_sf_addname(args);
+
ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -654,7 +656,8 @@ xfs_dir2_sf_create(
xfs_dir2_sf_t *sfp; /* shortform structure */
int size; /* directory size */
- xfs_dir2_trace_args_i("sf_create", args, pino);
+ trace_xfs_dir2_sf_create(args);
+
dp = args->dp;
ASSERT(dp != NULL);
@@ -808,7 +811,8 @@ xfs_dir2_sf_lookup(
enum xfs_dacmp cmp; /* comparison result */
xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
- xfs_dir2_trace_args("sf_lookup", args);
+ trace_xfs_dir2_sf_lookup(args);
+
xfs_dir2_sf_check(args);
dp = args->dp;
@@ -891,7 +895,8 @@ xfs_dir2_sf_removename(
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
- xfs_dir2_trace_args("sf_removename", args);
+ trace_xfs_dir2_sf_removename(args);
+
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -982,7 +987,8 @@ xfs_dir2_sf_replace(
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
- xfs_dir2_trace_args("sf_replace", args);
+ trace_xfs_dir2_sf_replace(args);
+
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -1125,7 +1131,8 @@ xfs_dir2_sf_toino4(
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
- xfs_dir2_trace_args("sf_toino4", args);
+ trace_xfs_dir2_sf_toino4(args);
+
dp = args->dp;
/*
@@ -1202,7 +1209,8 @@ xfs_dir2_sf_toino8(
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
- xfs_dir2_trace_args("sf_toino8", args);
+ trace_xfs_dir2_sf_toino8(args);
+
dp = args->dp;
/*
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
deleted file mode 100644
index 6cc7c0c681ac..000000000000
--- a/fs/xfs/xfs_dir2_trace.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_inum.h"
-#include "xfs_dir2.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_dir2_trace.h"
-
-#ifdef XFS_DIR2_TRACE
-ktrace_t *xfs_dir2_trace_buf;
-
-/*
- * Enter something in the trace buffers.
- */
-static void
-xfs_dir2_trace_enter(
- xfs_inode_t *dp,
- int type,
- char *where,
- char *name,
- int namelen,
- void *a0,
- void *a1,
- void *a2,
- void *a3,
- void *a4,
- void *a5,
- void *a6,
- void *a7)
-{
- void *n[5];
-
- ASSERT(xfs_dir2_trace_buf);
- ASSERT(dp->i_dir_trace);
- if (name)
- memcpy(n, name, min((int)sizeof(n), namelen));
- else
- memset((char *)n, 0, sizeof(n));
- ktrace_enter(xfs_dir2_trace_buf,
- (void *)(long)type, (void *)where,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)(long)namelen,
- (void *)n[0], (void *)n[1], (void *)n[2],
- (void *)n[3], (void *)n[4]);
- ktrace_enter(dp->i_dir_trace,
- (void *)(long)type, (void *)where,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)(long)namelen,
- (void *)n[0], (void *)n[1], (void *)n[2],
- (void *)n[3], (void *)n[4]);
-}
-
-void
-xfs_dir2_trace_args(
- char *where,
- xfs_da_args_t *args)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- NULL, NULL);
-}
-
-void
-xfs_dir2_trace_args_b(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *bp)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(bp ? bp->bps[0] : NULL), NULL);
-}
-
-void
-xfs_dir2_trace_args_bb(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *lbp,
- xfs_dabuf_t *dbp)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(lbp ? lbp->bps[0] : NULL),
- (void *)(dbp ? dbp->bps[0] : NULL));
-}
-
-void
-xfs_dir2_trace_args_bibii(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *bs,
- int ss,
- xfs_dabuf_t *bd,
- int sd,
- int c)
-{
- xfs_buf_t *bpbs = bs ? bs->bps[0] : NULL;
- xfs_buf_t *bpbd = bd ? bd->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where,
- (char *)args->name, (int)args->namelen,
- (void *)args->dp, (void *)args->trans,
- (void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd,
- (void *)(long)c, NULL);
-}
-
-void
-xfs_dir2_trace_args_db(
- char *where,
- xfs_da_args_t *args,
- xfs_dir2_db_t db,
- xfs_dabuf_t *bp)
-{
- xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)db, (void *)dbp);
-}
-
-void
-xfs_dir2_trace_args_i(
- char *where,
- xfs_da_args_t *args,
- xfs_ino_t i)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)((unsigned long)(i >> 32)),
- (void *)((unsigned long)(i & 0xFFFFFFFF)));
-}
-
-void
-xfs_dir2_trace_args_s(
- char *where,
- xfs_da_args_t *args,
- int s)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)s, NULL);
-}
-
-void
-xfs_dir2_trace_args_sb(
- char *where,
- xfs_da_args_t *args,
- int s,
- xfs_dabuf_t *bp)
-{
- xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)s, (void *)dbp);
-}
-#endif /* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h
deleted file mode 100644
index ca3c754f4822..000000000000
--- a/fs/xfs/xfs_dir2_trace.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_TRACE_H__
-#define __XFS_DIR2_TRACE_H__
-
-/*
- * Tracing for xfs v2 directories.
- */
-
-#if defined(XFS_DIR2_TRACE)
-
-struct ktrace;
-struct xfs_dabuf;
-struct xfs_da_args;
-
-#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */
-#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */
-extern struct ktrace *xfs_dir2_trace_buf;
-
-#define XFS_DIR2_KTRACE_ARGS 1 /* args only */
-#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */
-#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */
-#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */
-#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */
-#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */
-#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */
-#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */
-
-void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
-void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *bp);
-void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
-void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *bs, int ss,
- struct xfs_dabuf *bd, int sd, int c);
-void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args,
- xfs_dir2_db_t db, struct xfs_dabuf *bp);
-void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i);
-void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s);
-void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
- struct xfs_dabuf *bp);
-
-#else /* XFS_DIR2_TRACE */
-
-#define xfs_dir2_trace_args(where, args)
-#define xfs_dir2_trace_args_b(where, args, bp)
-#define xfs_dir2_trace_args_bb(where, args, lbp, dbp)
-#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c)
-#define xfs_dir2_trace_args_db(where, args, db, bp)
-#define xfs_dir2_trace_args_i(where, args, i)
-#define xfs_dir2_trace_args_s(where, args, s)
-#define xfs_dir2_trace_args_sb(where, args, s, bp)
-
-#endif /* XFS_DIR2_TRACE */
-
-#endif /* __XFS_DIR2_TRACE_H__ */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index edf8bdf4141f..a631e1451abb 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -34,6 +34,7 @@
#include "xfs_utils.h"
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
+#include "xfs_trace.h"
#ifdef XFS_FILESTREAMS_TRACE
@@ -394,9 +395,7 @@ xfs_filestream_init(void)
item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
if (!item_zone)
return -ENOMEM;
-#ifdef XFS_FILESTREAMS_TRACE
- xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
-#endif
+
return 0;
}
@@ -407,9 +406,6 @@ xfs_filestream_init(void)
void
xfs_filestream_uninit(void)
{
-#ifdef XFS_FILESTREAMS_TRACE
- ktrace_free(xfs_filestreams_trace_buf);
-#endif
kmem_zone_destroy(item_zone);
}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 36079aa91344..a13919a6a364 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -45,6 +45,7 @@
#include "xfs_rtalloc.h"
#include "xfs_rw.h"
#include "xfs_filestream.h"
+#include "xfs_trace.h"
/*
* File system operations
@@ -347,6 +348,7 @@ xfs_growfs_data_private(
be32_add_cpu(&agf->agf_length, new);
ASSERT(be32_to_cpu(agf->agf_length) ==
be32_to_cpu(agi->agi_length));
+
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
/*
* Free the new space.
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 073bb4a26b19..f5c904a10c11 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,7 +43,7 @@
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_btree_trace.h"
-#include "xfs_dir2_trace.h"
+#include "xfs_trace.h"
/*
@@ -90,28 +90,6 @@ xfs_inode_alloc(
ip->i_size = 0;
ip->i_new_size = 0;
- /*
- * Initialize inode's trace buffers.
- */
-#ifdef XFS_INODE_TRACE
- ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_BMAP_TRACE
- ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_BTREE_TRACE
- ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_RW_TRACE
- ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_ILOCK_TRACE
- ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_DIR2_TRACE
- ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
-#endif
-
/* prevent anyone from using this yet */
VFS_I(ip)->i_state = I_NEW|I_LOCK;
@@ -133,25 +111,6 @@ xfs_inode_free(
if (ip->i_afp)
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-#ifdef XFS_INODE_TRACE
- ktrace_free(ip->i_trace);
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(ip->i_xtrace);
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(ip->i_btrace);
-#endif
-#ifdef XFS_RW_TRACE
- ktrace_free(ip->i_rwtrace);
-#endif
-#ifdef XFS_ILOCK_TRACE
- ktrace_free(ip->i_lock_trace);
-#endif
-#ifdef XFS_DIR2_TRACE
- ktrace_free(ip->i_dir_trace);
-#endif
-
if (ip->i_itemp) {
/*
* Only if we are shutting down the fs will we see an
@@ -210,6 +169,7 @@ xfs_iget_cache_hit(
* instead of polling for it.
*/
if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
+ trace_xfs_iget_skip(ip);
XFS_STATS_INC(xs_ig_frecycle);
error = EAGAIN;
goto out_error;
@@ -228,7 +188,7 @@ xfs_iget_cache_hit(
* Need to carefully get it back into useable state.
*/
if (ip->i_flags & XFS_IRECLAIMABLE) {
- xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
+ trace_xfs_iget_reclaim(ip);
/*
* We need to set XFS_INEW atomically with clearing the
@@ -254,6 +214,7 @@ xfs_iget_cache_hit(
ip->i_flags &= ~XFS_INEW;
ip->i_flags |= XFS_IRECLAIMABLE;
__xfs_inode_set_reclaim_tag(pag, ip);
+ trace_xfs_iget_reclaim(ip);
goto out_error;
}
inode->i_state = I_LOCK|I_NEW;
@@ -273,8 +234,9 @@ xfs_iget_cache_hit(
xfs_ilock(ip, lock_flags);
xfs_iflags_clear(ip, XFS_ISTALE);
- xfs_itrace_exit_tag(ip, "xfs_iget.found");
XFS_STATS_INC(xs_ig_found);
+
+ trace_xfs_iget_found(ip);
return 0;
out_error:
@@ -308,7 +270,7 @@ xfs_iget_cache_miss(
if (error)
goto out_destroy;
- xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
+ xfs_itrace_entry(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
@@ -353,6 +315,8 @@ xfs_iget_cache_miss(
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
+
+ trace_xfs_iget_alloc(ip);
*ipp = ip;
return 0;
@@ -639,7 +603,7 @@ xfs_ilock(
else if (lock_flags & XFS_ILOCK_SHARED)
mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
- xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
+ trace_xfs_ilock(ip, lock_flags, _RET_IP_);
}
/*
@@ -684,7 +648,7 @@ xfs_ilock_nowait(
if (!mrtryaccess(&ip->i_lock))
goto out_undo_iolock;
}
- xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
+ trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
return 1;
out_undo_iolock:
@@ -746,7 +710,7 @@ xfs_iunlock(
xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
(xfs_log_item_t*)(ip->i_itemp));
}
- xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
+ trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
}
/*
@@ -765,6 +729,8 @@ xfs_ilock_demote(
mrdemote(&ip->i_lock);
if (lock_flags & XFS_IOLOCK_EXCL)
mrdemote(&ip->i_iolock);
+
+ trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
}
#ifdef DEBUG
@@ -795,52 +761,3 @@ xfs_isilocked(
return 1;
}
#endif
-
-#ifdef XFS_INODE_TRACE
-
-#define KTRACE_ENTER(ip, vk, s, line, ra) \
- ktrace_enter((ip)->i_trace, \
-/* 0 */ (void *)(__psint_t)(vk), \
-/* 1 */ (void *)(s), \
-/* 2 */ (void *)(__psint_t) line, \
-/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
-/* 4 */ (void *)(ra), \
-/* 5 */ NULL, \
-/* 6 */ (void *)(__psint_t)current_cpu(), \
-/* 7 */ (void *)(__psint_t)current_pid(), \
-/* 8 */ (void *)__return_address, \
-/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
-
-/*
- * Vnode tracing code.
- */
-void
-_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
-}
-
-void
-_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
-}
-
-void
-xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
-}
-
-void
-_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
-}
-
-void
-xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
-}
-#endif /* XFS_INODE_TRACE */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b92a4fa2a0a1..ce278b3ae7fc 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -47,10 +47,10 @@
#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_utils.h"
-#include "xfs_dir2_trace.h"
#include "xfs_quota.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_ifork_zone;
kmem_zone_t *xfs_inode_zone;
@@ -1291,42 +1291,6 @@ xfs_file_last_byte(
return last_byte;
}
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_itrunc_trace(
- int tag,
- xfs_inode_t *ip,
- int flag,
- xfs_fsize_t new_size,
- xfs_off_t toss_start,
- xfs_off_t toss_finish)
-{
- if (ip->i_rwtrace == NULL) {
- return;
- }
-
- ktrace_enter(ip->i_rwtrace,
- (void*)((long)tag),
- (void*)ip,
- (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
- (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
- (void*)((long)flag),
- (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
- (void*)(unsigned long)(new_size & 0xffffffff),
- (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
- (void*)(unsigned long)(toss_start & 0xffffffff),
- (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
- (void*)(unsigned long)(toss_finish & 0xffffffff),
- (void*)(unsigned long)current_cpu(),
- (void*)(unsigned long)current_pid(),
- (void*)NULL,
- (void*)NULL,
- (void*)NULL);
-}
-#else
-#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
-#endif
-
/*
* Start the truncation of the file to new_size. The new size
* must be smaller than the current size. This routine will
@@ -1409,8 +1373,7 @@ xfs_itruncate_start(
return 0;
}
last_byte = xfs_file_last_byte(ip);
- xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
- last_byte);
+ trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
if (last_byte > toss_start) {
if (flags & XFS_ITRUNC_DEFINITE) {
xfs_tosspages(ip, toss_start,
@@ -1514,7 +1477,8 @@ xfs_itruncate_finish(
new_size = 0LL;
}
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
+ trace_xfs_itruncate_finish_start(ip, new_size);
+
/*
* The first thing we do is set the size to new_size permanently
* on disk. This way we don't have to worry about anyone ever
@@ -1731,7 +1695,7 @@ xfs_itruncate_finish(
ASSERT((new_size != 0) ||
(fork == XFS_ATTR_FORK) ||
(ip->i_d.di_nextents == 0));
- xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
+ trace_xfs_itruncate_finish_end(ip, new_size);
return 0;
}
@@ -3252,23 +3216,6 @@ corrupt_out:
return XFS_ERROR(EFSCORRUPTED);
}
-
-
-#ifdef XFS_ILOCK_TRACE
-void
-xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
-{
- ktrace_enter(ip->i_lock_trace,
- (void *)ip,
- (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
- (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
- (void *)ra, /* caller of ilock */
- (void *)(unsigned long)current_cpu(),
- (void *)(unsigned long)current_pid(),
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
-}
-#endif
-
/*
* Return a pointer to the extent record at file index idx.
*/
@@ -3300,13 +3247,17 @@ xfs_iext_get_ext(
*/
void
xfs_iext_insert(
- xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t idx, /* starting index of new items */
xfs_extnum_t count, /* number of inserted items */
- xfs_bmbt_irec_t *new) /* items to insert */
+ xfs_bmbt_irec_t *new, /* items to insert */
+ int state) /* type of extent conversion */
{
+ xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
xfs_extnum_t i; /* extent record index */
+ trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
+
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
xfs_iext_add(ifp, idx, count);
for (i = idx; i < idx + count; i++, new++)
@@ -3549,13 +3500,17 @@ xfs_iext_add_indirect_multi(
*/
void
xfs_iext_remove(
- xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t idx, /* index to begin removing exts */
- int ext_diff) /* number of extents to remove */
+ int ext_diff, /* number of extents to remove */
+ int state) /* type of extent conversion */
{
+ xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
xfs_extnum_t nextents; /* number of extents in file */
int new_size; /* size of extents after removal */
+ trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
+
ASSERT(ext_diff > 0);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 41555de1d1db..ec1f28c4fc4f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -213,7 +213,6 @@ typedef struct xfs_icdinode {
struct bhv_desc;
struct cred;
-struct ktrace;
struct xfs_buf;
struct xfs_bmap_free;
struct xfs_bmbt_irec;
@@ -222,13 +221,6 @@ struct xfs_mount;
struct xfs_trans;
struct xfs_dquot;
-#if defined(XFS_ILOCK_TRACE)
-#define XFS_ILOCK_KTRACE_SIZE 32
-extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *);
-#else
-#define xfs_ilock_trace(i,n,f,ra)
-#endif
-
typedef struct dm_attrs_s {
__uint32_t da_dmevmask; /* DMIG event mask */
__uint16_t da_dmstate; /* DMIG state info */
@@ -271,26 +263,6 @@ typedef struct xfs_inode {
/* VFS inode */
struct inode i_vnode; /* embedded VFS inode */
-
- /* Trace buffers per inode. */
-#ifdef XFS_INODE_TRACE
- struct ktrace *i_trace; /* general inode trace */
-#endif
-#ifdef XFS_BMAP_TRACE
- struct ktrace *i_xtrace; /* inode extent list trace */
-#endif
-#ifdef XFS_BTREE_TRACE
- struct ktrace *i_btrace; /* inode bmap btree trace */
-#endif
-#ifdef XFS_RW_TRACE
- struct ktrace *i_rwtrace; /* inode read/write trace */
-#endif
-#ifdef XFS_ILOCK_TRACE
- struct ktrace *i_lock_trace; /* inode lock/unlock trace */
-#endif
-#ifdef XFS_DIR2_TRACE
- struct ktrace *i_dir_trace; /* inode directory trace */
-#endif
} xfs_inode_t;
#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
@@ -406,6 +378,14 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
| XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
+#define XFS_LOCK_FLAGS \
+ { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
+ { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
+ { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
+ { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
+ { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" }
+
+
/*
* Flags for lockdep annotations.
*
@@ -455,6 +435,10 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_ITRUNC_DEFINITE 0x1
#define XFS_ITRUNC_MAYBE 0x2
+#define XFS_ITRUNC_FLAGS \
+ { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
+ { XFS_ITRUNC_MAYBE, "MAYBE" }
+
/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
@@ -507,48 +491,16 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
void xfs_synchronize_times(xfs_inode_t *);
void xfs_mark_inode_dirty_sync(xfs_inode_t *);
-#if defined(XFS_INODE_TRACE)
-
-#define INODE_TRACE_SIZE 16 /* number of trace entries */
-#define INODE_KTRACE_ENTRY 1
-#define INODE_KTRACE_EXIT 2
-#define INODE_KTRACE_HOLD 3
-#define INODE_KTRACE_REF 4
-#define INODE_KTRACE_RELE 5
-
-extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
-extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
-extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
-extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
-extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
-#define xfs_itrace_entry(ip) \
- _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit(ip) \
- _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit_tag(ip, tag) \
- _xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
-#define xfs_itrace_ref(ip) \
- _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
-
-#else
-#define xfs_itrace_entry(a)
-#define xfs_itrace_exit(a)
-#define xfs_itrace_exit_tag(a, b)
-#define xfs_itrace_hold(a, b, c, d)
-#define xfs_itrace_ref(a)
-#define xfs_itrace_rele(a, b, c, d)
-#endif
-
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
atomic_inc(&(VFS_I(ip)->i_count)); \
- xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+ trace_xfs_ihold(ip, _THIS_IP_); \
} while (0)
#define IRELE(ip) \
do { \
- xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+ trace_xfs_irele(ip, _THIS_IP_); \
iput(VFS_I(ip)); \
} while (0)
@@ -577,11 +529,11 @@ int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int);
xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
-void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t,
- xfs_bmbt_irec_t *);
+void xfs_iext_insert(xfs_inode_t *, xfs_extnum_t, xfs_extnum_t,
+ xfs_bmbt_irec_t *, int);
void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
-void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int);
+void xfs_iext_remove(xfs_inode_t *, xfs_extnum_t, int, int);
void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9794b876d6ff..f38855d21ea5 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -41,6 +41,7 @@
#include "xfs_ialloc.h"
#include "xfs_rw.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -800,7 +801,9 @@ xfs_inode_item_pushbuf(
!completion_done(&ip->i_flush));
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- xfs_buftrace("INODE ITEM PUSH", bp);
+
+ trace_xfs_inode_item_push(bp, _RET_IP_);
+
if (XFS_BUF_ISPINNED(bp)) {
xfs_log_force(mp, (xfs_lsn_t)0,
XFS_LOG_FORCE);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7294abce6ef2..0b65039951a0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,72 +47,8 @@
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_iomap.h"
+#include "xfs_trace.h"
-#if defined(XFS_RW_TRACE)
-void
-xfs_iomap_enter_trace(
- int tag,
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count)
-{
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)count),
- (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-
-void
-xfs_iomap_map_trace(
- int tag,
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count,
- xfs_iomap_t *iomapp,
- xfs_bmbt_irec_t *imapp,
- int flags)
-{
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)count),
- (void *)((unsigned long)flags),
- (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
- (void *)((unsigned long)(iomapp->iomap_delta)),
- (void *)((unsigned long)(iomapp->iomap_bsize)),
- (void *)((unsigned long)(iomapp->iomap_bn)),
- (void *)(__psint_t)(imapp->br_startoff),
- (void *)((unsigned long)(imapp->br_blockcount)),
- (void *)(__psint_t)(imapp->br_startblock));
-}
-#else
-#define xfs_iomap_enter_trace(tag, io, offset, count)
-#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
-#endif
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
@@ -187,21 +123,20 @@ xfs_iomap(
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
+ trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
+
switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
case BMAPI_READ:
- xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count);
lockmode = xfs_ilock_map_shared(ip);
bmapi_flags = XFS_BMAPI_ENTIRE;
break;
case BMAPI_WRITE:
- xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_EXCL;
if (flags & BMAPI_IGNSTATE)
bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
xfs_ilock(ip, lockmode);
break;
case BMAPI_ALLOCATE:
- xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_SHARED;
bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -237,8 +172,7 @@ xfs_iomap(
if (nimaps &&
(imap.br_startblock != HOLESTARTBLOCK) &&
(imap.br_startblock != DELAYSTARTBLOCK)) {
- xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_found(ip, offset, count, flags, &imap);
break;
}
@@ -250,8 +184,7 @@ xfs_iomap(
&imap, &nimaps);
}
if (!error) {
- xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
}
iomap_flags = IOMAP_NEW;
break;
@@ -261,8 +194,7 @@ xfs_iomap(
lockmode = 0;
if (nimaps && !isnullstartblock(imap.br_startblock)) {
- xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_found(ip, offset, count, flags, &imap);
break;
}
@@ -623,8 +555,7 @@ retry:
* delalloc blocks and retry without EOF preallocation.
*/
if (nimaps == 0) {
- xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
- ip, offset, count);
+ trace_xfs_delalloc_enospc(ip, offset, count);
if (flushed)
return XFS_ERROR(ENOSPC);
@@ -837,7 +768,7 @@ xfs_iomap_write_unwritten(
int committed;
int error;
- xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count);
+ trace_xfs_unwritten_convert(ip, offset, count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fdcf7b82747f..174f29990991 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -43,6 +43,14 @@ typedef enum {
BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
} bmapi_flags_t;
+#define BMAPI_FLAGS \
+ { BMAPI_READ, "READ" }, \
+ { BMAPI_WRITE, "WRITE" }, \
+ { BMAPI_ALLOCATE, "ALLOCATE" }, \
+ { BMAPI_IGNSTATE, "IGNSTATE" }, \
+ { BMAPI_DIRECT, "DIRECT" }, \
+ { BMAPI_MMAP, "MMAP" }, \
+ { BMAPI_TRYLOCK, "TRYLOCK" }
/*
* xfs_iomap_t: File system I/O map
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9dbdff3ea484..4cb1792040e3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -40,6 +40,7 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -122,85 +123,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
STATIC int xlog_iclogs_empty(xlog_t *log);
-#if defined(XFS_LOG_TRACE)
-
-#define XLOG_TRACE_LOGGRANT_SIZE 2048
-#define XLOG_TRACE_ICLOG_SIZE 256
-
-void
-xlog_trace_loggrant_alloc(xlog_t *log)
-{
- log->l_grant_trace = ktrace_alloc(XLOG_TRACE_LOGGRANT_SIZE, KM_NOFS);
-}
-
-void
-xlog_trace_loggrant_dealloc(xlog_t *log)
-{
- ktrace_free(log->l_grant_trace);
-}
-
-void
-xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
-{
- unsigned long cnts;
-
- /* ticket counts are 1 byte each */
- cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
-
- ktrace_enter(log->l_grant_trace,
- (void *)tic,
- (void *)log->l_reserve_headq,
- (void *)log->l_write_headq,
- (void *)((unsigned long)log->l_grant_reserve_cycle),
- (void *)((unsigned long)log->l_grant_reserve_bytes),
- (void *)((unsigned long)log->l_grant_write_cycle),
- (void *)((unsigned long)log->l_grant_write_bytes),
- (void *)((unsigned long)log->l_curr_cycle),
- (void *)((unsigned long)log->l_curr_block),
- (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
- (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
- (void *)string,
- (void *)((unsigned long)tic->t_trans_type),
- (void *)cnts,
- (void *)((unsigned long)tic->t_curr_res),
- (void *)((unsigned long)tic->t_unit_res));
-}
-
-void
-xlog_trace_iclog_alloc(xlog_in_core_t *iclog)
-{
- iclog->ic_trace = ktrace_alloc(XLOG_TRACE_ICLOG_SIZE, KM_NOFS);
-}
-
-void
-xlog_trace_iclog_dealloc(xlog_in_core_t *iclog)
-{
- ktrace_free(iclog->ic_trace);
-}
-
-void
-xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
-{
- ktrace_enter(iclog->ic_trace,
- (void *)((unsigned long)state),
- (void *)((unsigned long)current_pid()),
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL);
-}
-#else
-
-#define xlog_trace_loggrant_alloc(log)
-#define xlog_trace_loggrant_dealloc(log)
-#define xlog_trace_loggrant(log,tic,string)
-
-#define xlog_trace_iclog_alloc(iclog)
-#define xlog_trace_iclog_dealloc(iclog)
-#define xlog_trace_iclog(iclog,state)
-
-#endif /* XFS_LOG_TRACE */
-
static void
xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
@@ -353,15 +275,17 @@ xfs_log_done(xfs_mount_t *mp,
if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
(flags & XFS_LOG_REL_PERM_RESERV)) {
+ trace_xfs_log_done_nonperm(log, ticket);
+
/*
* Release ticket if not permanent reservation or a specific
* request has been made to release a permanent reservation.
*/
- xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
xlog_ungrant_log_space(log, ticket);
xfs_log_ticket_put(ticket);
} else {
- xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
+ trace_xfs_log_done_perm(log, ticket);
+
xlog_regrant_reserve_log_space(log, ticket);
/* If this ticket was a permanent reservation and we aren't
* trying to release it, reset the inited flags; so next time
@@ -505,10 +429,13 @@ xfs_log_reserve(xfs_mount_t *mp,
XFS_STATS_INC(xs_try_logspace);
+
if (*ticket != NULL) {
ASSERT(flags & XFS_LOG_PERM_RESERV);
internal_ticket = (xlog_ticket_t *)*ticket;
- xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)");
+
+ trace_xfs_log_reserve(log, internal_ticket);
+
xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
retval = xlog_regrant_write_log_space(log, internal_ticket);
} else {
@@ -519,10 +446,9 @@ xfs_log_reserve(xfs_mount_t *mp,
return XFS_ERROR(ENOMEM);
internal_ticket->t_trans_type = t_type;
*ticket = internal_ticket;
- xlog_trace_loggrant(log, internal_ticket,
- (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ?
- "xfs_log_reserve: create new ticket (permanent trans)" :
- "xfs_log_reserve: create new ticket");
+
+ trace_xfs_log_reserve(log, internal_ticket);
+
xlog_grant_push_ail(mp,
(internal_ticket->t_unit_res *
internal_ticket->t_cnt));
@@ -734,7 +660,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
spin_unlock(&log->l_icloglock);
}
if (tic) {
- xlog_trace_loggrant(log, tic, "unmount rec");
+ trace_xfs_log_umount_write(log, tic);
xlog_ungrant_log_space(log, tic);
xfs_log_ticket_put(tic);
}
@@ -1030,7 +956,6 @@ xlog_iodone(xfs_buf_t *bp)
xfs_fs_cmn_err(CE_WARN, l->l_mp,
"xlog_iodone: Barriers are no longer supported"
" by device. Disabling barriers\n");
- xfs_buftrace("XLOG_IODONE BARRIERS OFF", bp);
}
/*
@@ -1085,13 +1010,10 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
return 0;
}
- xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
XFS_BUF_ERROR(bp, EIO);
XFS_BUF_STALE(bp);
xfs_biodone(bp);
return XFS_ERROR(EIO);
-
-
}
/*
@@ -1246,7 +1168,6 @@ xlog_alloc_log(xfs_mount_t *mp,
spin_lock_init(&log->l_grant_lock);
sv_init(&log->l_flush_wait, 0, "flush_wait");
- xlog_trace_loggrant_alloc(log);
/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1305,8 +1226,6 @@ xlog_alloc_log(xfs_mount_t *mp,
sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
- xlog_trace_iclog_alloc(iclog);
-
iclogp = &iclog->ic_next;
}
*iclogp = log->l_iclog; /* complete ring */
@@ -1321,13 +1240,11 @@ out_free_iclog:
sv_destroy(&iclog->ic_force_wait);
sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
- xlog_trace_iclog_dealloc(iclog);
}
kmem_free(iclog);
}
spinlock_destroy(&log->l_icloglock);
spinlock_destroy(&log->l_grant_lock);
- xlog_trace_loggrant_dealloc(log);
xfs_buf_free(log->l_xbuf);
out_free_log:
kmem_free(log);
@@ -1607,7 +1524,6 @@ xlog_dealloc_log(xlog_t *log)
sv_destroy(&iclog->ic_force_wait);
sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
- xlog_trace_iclog_dealloc(iclog);
next_iclog = iclog->ic_next;
kmem_free(iclog);
iclog = next_iclog;
@@ -1616,7 +1532,6 @@ xlog_dealloc_log(xlog_t *log)
spinlock_destroy(&log->l_grant_lock);
xfs_buf_free(log->l_xbuf);
- xlog_trace_loggrant_dealloc(log);
log->l_mp->m_log = NULL;
kmem_free(log);
} /* xlog_dealloc_log */
@@ -2414,7 +2329,6 @@ restart:
iclog = log->l_iclog;
if (iclog->ic_state != XLOG_STATE_ACTIVE) {
- xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
XFS_STATS_INC(xs_log_noiclogs);
/* Wait for log writes to have flushed */
@@ -2520,13 +2434,15 @@ xlog_grant_log_space(xlog_t *log,
/* Is there space or do we need to sleep? */
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter");
+
+ trace_xfs_log_grant_enter(log, tic);
/* something is already sleeping; insert new transaction at end */
if (log->l_reserve_headq) {
xlog_ins_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: sleep 1");
+
+ trace_xfs_log_grant_sleep1(log, tic);
+
/*
* Gotta check this before going to sleep, while we're
* holding the grant lock.
@@ -2540,8 +2456,7 @@ xlog_grant_log_space(xlog_t *log,
* If we got an error, and the filesystem is shutting down,
* we'll catch it down below. So just continue...
*/
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: wake 1");
+ trace_xfs_log_grant_wake1(log, tic);
spin_lock(&log->l_grant_lock);
}
if (tic->t_flags & XFS_LOG_PERM_RESERV)
@@ -2558,8 +2473,9 @@ redo:
if (free_bytes < need_bytes) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: sleep 2");
+
+ trace_xfs_log_grant_sleep2(log, tic);
+
spin_unlock(&log->l_grant_lock);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
@@ -2571,8 +2487,8 @@ redo:
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: wake 2");
+ trace_xfs_log_grant_wake2(log, tic);
+
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
@@ -2592,7 +2508,7 @@ redo:
ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
}
#endif
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit");
+ trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
return 0;
@@ -2600,7 +2516,9 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
+
+ trace_xfs_log_grant_error(log, tic);
+
/*
* If we are failing, make sure the ticket doesn't have any
* current reservations. We don't want to add this back when
@@ -2640,7 +2558,8 @@ xlog_regrant_write_log_space(xlog_t *log,
#endif
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter");
+
+ trace_xfs_log_regrant_write_enter(log, tic);
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
@@ -2669,8 +2588,8 @@ xlog_regrant_write_log_space(xlog_t *log,
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_write_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: sleep 1");
+ trace_xfs_log_regrant_write_sleep1(log, tic);
+
spin_unlock(&log->l_grant_lock);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
@@ -2685,8 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: wake 1");
+ trace_xfs_log_regrant_write_wake1(log, tic);
}
}
@@ -2704,6 +2622,8 @@ redo:
spin_lock(&log->l_grant_lock);
XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep2(log, tic);
+
sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
/* If we're shutting down, this tic is already off the queue */
@@ -2711,8 +2631,7 @@ redo:
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: wake 2");
+ trace_xfs_log_regrant_write_wake2(log, tic);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_write_headq, tic);
@@ -2727,7 +2646,8 @@ redo:
}
#endif
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit");
+ trace_xfs_log_regrant_write_exit(log, tic);
+
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
return 0;
@@ -2736,7 +2656,9 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
+
+ trace_xfs_log_regrant_write_error(log, tic);
+
/*
* If we are failing, make sure the ticket doesn't have any
* current reservations. We don't want to add this back when
@@ -2760,8 +2682,8 @@ STATIC void
xlog_regrant_reserve_log_space(xlog_t *log,
xlog_ticket_t *ticket)
{
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: enter");
+ trace_xfs_log_regrant_reserve_enter(log, ticket);
+
if (ticket->t_cnt > 0)
ticket->t_cnt--;
@@ -2769,8 +2691,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
xlog_grant_sub_space(log, ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: sub current res");
+
+ trace_xfs_log_regrant_reserve_sub(log, ticket);
+
xlog_verify_grant_head(log, 1);
/* just return if we still have some of the pre-reserved space */
@@ -2780,8 +2703,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
}
xlog_grant_add_space_reserve(log, ticket->t_unit_res);
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: exit");
+
+ trace_xfs_log_regrant_reserve_exit(log, ticket);
+
xlog_verify_grant_head(log, 0);
spin_unlock(&log->l_grant_lock);
ticket->t_curr_res = ticket->t_unit_res;
@@ -2811,11 +2735,11 @@ xlog_ungrant_log_space(xlog_t *log,
ticket->t_cnt--;
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
+ trace_xfs_log_ungrant_enter(log, ticket);
xlog_grant_sub_space(log, ticket->t_curr_res);
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
+ trace_xfs_log_ungrant_sub(log, ticket);
/* If this is a permanent reservation ticket, we may be able to free
* up more space based on the remaining count.
@@ -2825,7 +2749,8 @@ xlog_ungrant_log_space(xlog_t *log,
xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
}
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
+ trace_xfs_log_ungrant_exit(log, ticket);
+
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
xfs_log_move_tail(log->l_mp, 1);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 679c7c4926a2..d55662db7077 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,6 @@
#define __XFS_LOG_PRIV_H__
struct xfs_buf;
-struct ktrace;
struct log;
struct xlog_ticket;
struct xfs_buf_cancel;
@@ -135,6 +134,12 @@ static inline uint xlog_get_client_id(__be32 i)
#define XLOG_TIC_INITED 0x1 /* has been initialized */
#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
#define XLOG_TIC_IN_Q 0x4
+
+#define XLOG_TIC_FLAGS \
+ { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
+ { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \
+ { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
+
#endif /* __KERNEL__ */
#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
@@ -361,9 +366,6 @@ typedef struct xlog_in_core {
int ic_bwritecnt;
unsigned short ic_state;
char *ic_datap; /* pointer to iclog data */
-#ifdef XFS_LOG_TRACE
- struct ktrace *ic_trace;
-#endif
/* Callback structures need their own cacheline */
spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
@@ -429,10 +431,6 @@ typedef struct log {
int l_grant_write_cycle;
int l_grant_write_bytes;
-#ifdef XFS_LOG_TRACE
- struct ktrace *l_grant_trace;
-#endif
-
/* The following field are used for debugging; need to hold icloglock */
#ifdef DEBUG
char *l_iclog_bak[XLOG_MAX_ICLOGS];
@@ -456,12 +454,6 @@ extern void xlog_put_bp(struct xfs_buf *);
extern kmem_zone_t *xfs_log_ticket_zone;
-/* iclog tracing */
-#define XLOG_TRACE_GRAB_FLUSH 1
-#define XLOG_TRACE_REL_FLUSH 2
-#define XLOG_TRACE_SLEEP_FLUSH 3
-#define XLOG_TRACE_WAKE_FLUSH 4
-
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
* It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1ec98ed914d4..69ac2e5ef20c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -46,6 +46,7 @@
#include "xfs_quota.h"
#include "xfs_rw.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
@@ -225,16 +226,10 @@ xlog_header_check_dump(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- int b;
-
- cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__);
- for (b = 0; b < 16; b++)
- cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
- cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
- cmn_err(CE_DEBUG, " log : uuid = ");
- for (b = 0; b < 16; b++)
- cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
- cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
+ cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n",
+ __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
+ cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n",
+ &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
}
#else
#define xlog_header_check_dump(mp, head)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 66a888a9ad6f..eb403b40e120 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -44,6 +44,8 @@
#include "xfs_quota.h"
#include "xfs_fsops.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
+
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
@@ -2389,12 +2391,12 @@ xfs_icsb_modify_counters(
{
xfs_icsb_cnts_t *icsbp;
long long lcounter; /* long counter for 64 bit fields */
- int cpu, ret = 0;
+ int ret = 0;
might_sleep();
again:
- cpu = get_cpu();
- icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
+ preempt_disable();
+ icsbp = this_cpu_ptr(mp->m_sb_cnts);
/*
* if the counter is disabled, go to slow path
@@ -2438,11 +2440,11 @@ again:
break;
}
xfs_icsb_unlock_cntr(icsbp);
- put_cpu();
+ preempt_enable();
return 0;
slow_path:
- put_cpu();
+ preempt_enable();
/*
* serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2492,7 @@ slow_path:
balance_counter:
xfs_icsb_unlock_cntr(icsbp);
- put_cpu();
+ preempt_enable();
/*
* We may have multiple threads here if multiple per-cpu
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 3ec91ac74c2a..91bfd60f4c74 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -92,6 +92,14 @@ typedef struct xfs_dqblk {
#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
+#define XFS_DQ_FLAGS \
+ { XFS_DQ_USER, "USER" }, \
+ { XFS_DQ_PROJ, "PROJ" }, \
+ { XFS_DQ_GROUP, "GROUP" }, \
+ { XFS_DQ_DIRTY, "DIRTY" }, \
+ { XFS_DQ_WANT, "WANT" }, \
+ { XFS_DQ_INACTIVE, "INACTIVE" }
+
/*
* In the worst case, when both user and group quotas are on,
* we can have a max of three dquots changing in a single transaction.
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index b81deea0ce19..fc1cda23b817 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -39,6 +39,7 @@
#include "xfs_utils.h"
#include "xfs_trans_space.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 385f6dceba5d..9e15a1185362 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -45,6 +45,7 @@
#include "xfs_inode_item.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
/*
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 4c199d18f850..5aa07caea5f1 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -44,6 +44,7 @@
#include "xfs_error.h"
#include "xfs_buf_item.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
/*
* This is a subroutine for xfs_write() and other writers (xfs_ioctl)
@@ -171,7 +172,6 @@ xfs_bioerror(
* No need to wait until the buffer is unpinned.
* We aren't flushing it.
*/
- xfs_buftrace("XFS IOERROR", bp);
XFS_BUF_ERROR(bp, EIO);
/*
* We're calling biodone, so delete B_DONE flag. Either way
@@ -205,7 +205,6 @@ xfs_bioerror_relse(
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
- xfs_buftrace("XFS IOERRELSE", bp);
fl = XFS_BUF_BFLAGS(bp);
/*
* No need to wait until the buffer is unpinned.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a0574f593f52..ca64f33c63a3 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -100,6 +100,49 @@ typedef struct xfs_trans_header {
#define XFS_TRANS_TYPE_MAX 41
/* new transaction types need to be reflected in xfs_logprint(8) */
+#define XFS_TRANS_TYPES \
+ { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
+ { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
+ { XFS_TRANS_INACTIVE, "INACTIVE" }, \
+ { XFS_TRANS_CREATE, "CREATE" }, \
+ { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
+ { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
+ { XFS_TRANS_REMOVE, "REMOVE" }, \
+ { XFS_TRANS_LINK, "LINK" }, \
+ { XFS_TRANS_RENAME, "RENAME" }, \
+ { XFS_TRANS_MKDIR, "MKDIR" }, \
+ { XFS_TRANS_RMDIR, "RMDIR" }, \
+ { XFS_TRANS_SYMLINK, "SYMLINK" }, \
+ { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
+ { XFS_TRANS_GROWFS, "GROWFS" }, \
+ { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
+ { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
+ { XFS_TRANS_WRITEID, "WRITEID" }, \
+ { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
+ { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
+ { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
+ { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
+ { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
+ { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
+ { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
+ { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
+ { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
+ { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
+ { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
+ { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
+ { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
+ { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
+ { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
+ { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
+ { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
+ { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
+ { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
+ { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
+ { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
+ { XFS_TRANS_DUMMY1, "DUMMY1" }, \
+ { XFS_TRANS_DUMMY2, "DUMMY2" }, \
+ { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
+
/*
* This structure is used to track log items associated with
* a transaction. It points to the log item and keeps some
@@ -782,6 +825,10 @@ typedef struct xfs_log_item {
#define XFS_LI_IN_AIL 0x1
#define XFS_LI_ABORTED 0x2
+#define XFS_LI_FLAGS \
+ { XFS_LI_IN_AIL, "IN_AIL" }, \
+ { XFS_LI_ABORTED, "ABORTED" }
+
typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 03a1f701fea8..49130628d5ef 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -38,6 +38,7 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
@@ -95,26 +96,23 @@ xfs_trans_get_buf(xfs_trans_t *tp,
}
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
- if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
- xfs_buftrace("TRANS GET RECUR SHUT", bp);
+ if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
XFS_BUF_SUPER_STALE(bp);
- }
+
/*
* If the buffer is stale then it was binval'ed
* since last read. This doesn't matter since the
* caller isn't allowed to use the data anyway.
*/
- else if (XFS_BUF_ISSTALE(bp)) {
- xfs_buftrace("TRANS GET RECUR STALE", bp);
+ else if (XFS_BUF_ISSTALE(bp))
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
- }
+
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
- xfs_buftrace("TRANS GET RECUR", bp);
- xfs_buf_item_trace("GET RECUR", bip);
+ trace_xfs_trans_get_buf_recur(bip);
return (bp);
}
@@ -166,8 +164,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buftrace("TRANS GET", bp);
- xfs_buf_item_trace("GET", bip);
+ trace_xfs_trans_get_buf(bip);
return (bp);
}
@@ -207,7 +204,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
- xfs_buf_item_trace("GETSB RECUR", bip);
+ trace_xfs_trans_getsb_recur(bip);
return (bp);
}
@@ -249,7 +246,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buf_item_trace("GETSB", bip);
+ trace_xfs_trans_getsb(bip);
return (bp);
}
@@ -347,7 +344,7 @@ xfs_trans_read_buf(
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
if (!(XFS_BUF_ISDONE(bp))) {
- xfs_buftrace("READ_BUF_INCORE !DONE", bp);
+ trace_xfs_trans_read_buf_io(bp, _RET_IP_);
ASSERT(!XFS_BUF_ISASYNC(bp));
XFS_BUF_READ(bp);
xfsbdstrat(tp->t_mountp, bp);
@@ -372,7 +369,7 @@ xfs_trans_read_buf(
* brelse it either. Just get out.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
+ trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
*bpp = NULL;
return XFS_ERROR(EIO);
}
@@ -382,7 +379,7 @@ xfs_trans_read_buf(
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- xfs_buf_item_trace("READ RECUR", bip);
+ trace_xfs_trans_read_buf_recur(bip);
*bpp = bp;
return 0;
}
@@ -402,7 +399,6 @@ xfs_trans_read_buf(
}
if (XFS_BUF_GETERROR(bp) != 0) {
XFS_BUF_SUPER_STALE(bp);
- xfs_buftrace("READ ERROR", bp);
error = XFS_BUF_GETERROR(bp);
xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -461,8 +457,7 @@ xfs_trans_read_buf(
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buftrace("TRANS READ", bp);
- xfs_buf_item_trace("READ", bip);
+ trace_xfs_trans_read_buf(bip);
*bpp = bp;
return 0;
@@ -480,7 +475,7 @@ shutdown_abort:
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI));
- xfs_buftrace("READ_BUF XFSSHUTDN", bp);
+ trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp);
*bpp = NULL;
return XFS_ERROR(EIO);
@@ -546,13 +541,14 @@ xfs_trans_brelse(xfs_trans_t *tp,
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
+ trace_xfs_trans_brelse(bip);
+
/*
* If the release is just for a recursive lock,
* then decrement the count and return.
*/
if (bip->bli_recur > 0) {
bip->bli_recur--;
- xfs_buf_item_trace("RELSE RECUR", bip);
return;
}
@@ -560,10 +556,8 @@ xfs_trans_brelse(xfs_trans_t *tp,
* If the buffer is dirty within this transaction, we can't
* release it until we commit.
*/
- if (lidp->lid_flags & XFS_LID_DIRTY) {
- xfs_buf_item_trace("RELSE DIRTY", bip);
+ if (lidp->lid_flags & XFS_LID_DIRTY)
return;
- }
/*
* If the buffer has been invalidated, then we can't release
@@ -571,13 +565,10 @@ xfs_trans_brelse(xfs_trans_t *tp,
* as part of this transaction. This prevents us from pulling
* the item from the AIL before we should.
*/
- if (bip->bli_flags & XFS_BLI_STALE) {
- xfs_buf_item_trace("RELSE STALE", bip);
+ if (bip->bli_flags & XFS_BLI_STALE)
return;
- }
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
- xfs_buf_item_trace("RELSE", bip);
/*
* Free up the log item descriptor tracking the released item.
@@ -674,7 +665,7 @@ xfs_trans_bjoin(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buf_item_trace("BJOIN", bip);
+ trace_xfs_trans_bjoin(bip);
}
/*
@@ -698,7 +689,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_HOLD;
- xfs_buf_item_trace("BHOLD", bip);
+ trace_xfs_trans_bhold(bip);
}
/*
@@ -721,7 +712,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
bip->bli_flags &= ~XFS_BLI_HOLD;
- xfs_buf_item_trace("BHOLD RELEASE", bip);
+
+ trace_xfs_trans_bhold_release(bip);
}
/*
@@ -767,6 +759,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
+ trace_xfs_trans_log_buf(bip);
+
/*
* If we invalidated the buffer within this transaction, then
* cancel the invalidation now that we're dirtying the buffer
@@ -774,7 +768,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
* because we have a reference to the buffer this entire time.
*/
if (bip->bli_flags & XFS_BLI_STALE) {
- xfs_buf_item_trace("BLOG UNSTALE", bip);
bip->bli_flags &= ~XFS_BLI_STALE;
ASSERT(XFS_BUF_ISSTALE(bp));
XFS_BUF_UNSTALE(bp);
@@ -789,7 +782,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
lidp->lid_flags &= ~XFS_LID_BUF_STALE;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
- xfs_buf_item_trace("BLOG", bip);
}
@@ -828,6 +820,8 @@ xfs_trans_binval(
ASSERT(lidp != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ trace_xfs_trans_binval(bip);
+
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* If the buffer is already invalidated, then
@@ -840,8 +834,6 @@ xfs_trans_binval(
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
- xfs_buftrace("XFS_BINVAL RECUR", bp);
- xfs_buf_item_trace("BINVAL RECUR", bip);
return;
}
@@ -875,8 +867,6 @@ xfs_trans_binval(
(bip->bli_format.blf_map_size * sizeof(uint)));
lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
tp->t_flags |= XFS_TRANS_DIRTY;
- xfs_buftrace("XFS_BINVAL", bp);
- xfs_buf_item_trace("BINVAL", bip);
}
/*
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 578f3f59b789..6558ffd8d140 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -53,6 +53,7 @@
#include "xfs_log_priv.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
int
xfs_setattr(
@@ -1397,7 +1398,6 @@ xfs_lookup(
if (error)
goto out_free_name;
- xfs_itrace_ref(*ipp);
return 0;
out_free_name:
@@ -1543,7 +1543,6 @@ xfs_create(
* At this point, we've gotten a newly allocated inode.
* It is locked (and joined to the transaction).
*/
- xfs_itrace_ref(ip);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
/*
@@ -2003,9 +2002,6 @@ xfs_remove(
if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
- xfs_itrace_exit(ip);
- xfs_itrace_exit(dp);
-
std_return:
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
@@ -2302,7 +2298,6 @@ xfs_symlink(
goto error_return;
goto error1;
}
- xfs_itrace_ref(ip);
/*
* An error after we've joined dp to the transaction will result in the
@@ -2845,7 +2840,6 @@ xfs_free_file_space(
ioffset = offset & ~(rounding - 1);
if (VN_CACHED(VFS_I(ip)) != 0) {
- xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
if (error)
goto out_unlock_iolock;
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4b6755984d24..18c435d7c082 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line);
#endif
#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 5ee13b2fd223..20111265afd8 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -19,6 +19,11 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1c..8087b90d4673 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c87..3e09b345f4d6 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
#define ATMEL_MCI_MAX_NR_SLOTS 2
-#include <linux/dw_dmac.h>
-
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
- struct dw_dma_slave dma_slave;
+ struct mci_dma_data *dma_slave;
struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
};
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 000000000000..d5a1d4810b80
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297a..a3d6ee0044f9 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce8..d4c9c0b88adc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
-struct dm_table *dm_get_table(struct mapped_device *md);
+struct dm_table *dm_get_live_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
*/
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
/*
* A wrapper around vmalloc.
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6f..7084503c3405 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
struct dm_dirty_log {
struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
void *context;
};
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
* type->constructor/destructor() directly.
*/
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned argc, char **argv);
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31a..aa95508d2f95 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 15
+#define DM_VERSION_MINOR 16
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
+#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
+/*
+ * If set, any table information returned will relate to the inactive
+ * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
+ * is set before using the data returned.
+ */
+#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a41373..9e2a7a401df5 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error);
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
/*
* Region recovery control.
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a1569..f8c2e1767500 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
return 0;
}
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, format, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08b..fb737bc19a8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
static inline char *
efi_guid_unparse(efi_guid_t *guid, char *out)
{
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+ sprintf(out, "%pUl", guid->b);
return out;
}
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf3..1b12642636c7 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+static inline long IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index af634e95871d..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -169,7 +169,7 @@ struct hrtimer_clock_base {
* @max_hang_time: Maximum time spent in hrtimer_interrupt
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70fa..78b4bc64c006 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 69f07a9f1277..41235c93e4e9 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -93,7 +93,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
struct task_struct *tsk) { return NULL; }
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr) { return NULL; }
+ struct perf_event_attr *attr) { return -ENOSYS; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b266..02fc617782ef 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
- * @address_data: The I2C addresses to probe (for detect)
+ * @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
- const struct i2c_client_address_data *address_data;
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
-/* i2c_client_address_data is the struct for holding default client
- * addresses for a driver and for the parameters supplied on the
- * command line
- */
-struct i2c_client_address_data {
- const unsigned short *normal_i2c;
-};
-
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -576,82 +568,4 @@ union i2c_smbus_data {
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
#define I2C_SMBUS_I2C_BLOCK_DATA 8
-
-#ifdef __KERNEL__
-
-/* These defines are used for probing i2c client addresses */
-/* The length of the option lists */
-#define I2C_CLIENT_MAX_OPTS 48
-
-/* Default fill of many variables */
-#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
-
-/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
- module header */
-
-#define I2C_CLIENT_MODULE_PARM(var,desc) \
- static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
- static unsigned int var##_num; \
- module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var, desc)
-
-#define I2C_CLIENT_INSMOD_COMMON \
-static const struct i2c_client_address_data addr_data = { \
- .normal_i2c = normal_i2c, \
-}
-
-/* These are the ones you want to use in your own drivers. Pick the one
- which matches the number of devices the driver differenciates between. */
-#define I2C_CLIENT_INSMOD \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_1(chip1) \
-enum chips { any_chip, chip1 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
-enum chips { any_chip, chip1, chip2 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
-enum chips { any_chip, chip1, chip2, chip3 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
-enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7, chip8 }; \
-I2C_CLIENT_INSMOD_COMMON
-#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b8..08aa92278d71 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
#define TPS_VDCDC1 0x0c
# define TPS_ENABLE_LP (1 << 3)
#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
#define TPS_VREGS1 0x0e
# define TPS_LDO2_ENABLE (1 << 7)
# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
*/
extern int tps65013_set_low_pwr(unsigned mode);
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
struct i2c_client;
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbde..bf1c5be1f5b6 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __TWL4030_H_
-#define __TWL4030_H_
+#ifndef __TWL_H_
+#define __TWL_H_
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
#define TWL4030_MODULE_PWMA 0x0E
#define TWL4030_MODULE_PWMB 0x0F
+#define TWL5031_MODULE_ACCESSORY 0x10
+#define TWL5031_MODULE_INTERRUPTS 0x11
+
/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x10
-#define TWL4030_MODULE_INT 0x11
-#define TWL4030_MODULE_PM_MASTER 0x12
-#define TWL4030_MODULE_PM_RECEIVER 0x13
-#define TWL4030_MODULE_RTC 0x14
-#define TWL4030_MODULE_SECURED_REG 0x15
+#define TWL4030_MODULE_BACKUP 0x12
+#define TWL4030_MODULE_INT 0x13
+#define TWL4030_MODULE_PM_MASTER 0x14
+#define TWL4030_MODULE_PM_RECEIVER 0x15
+#define TWL4030_MODULE_RTC 0x16
+#define TWL4030_MODULE_SECURED_REG 0x17
+
+#define TWL_MODULE_USB TWL4030_MODULE_USB
+#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
+#define TWL_MODULE_PIH TWL4030_MODULE_PIH
+#define TWL_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
+#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define TWL_MODULE_RTC TWL4030_MODULE_RTC
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
* Read and write single 8-bit registers
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
/*
* Read and write several 8-bit registers at once.
*
- * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
* for the value, and populate your data starting at offset 1.
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
/*----------------------------------------------------------------------*/
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
/* Power bus message definitions */
/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define RES_TYPE_ALL 0x7
+/* Resource states */
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
/*----------------------------------------------------------------------*/
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
u8 devgroup; /* Processor group that Power resource belongs to */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
};
struct twl4030_power_data {
struct twl4030_script **scripts;
unsigned num;
struct twl4030_resconfig *resource_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+ struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
- /* LDO regulators */
+ /* Common LDO regulators for TWL4030/TWL6030 */
struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ /* TWL4030 LDO regulators */
struct regulator_init_data *vpll1;
struct regulator_init_data *vpll2;
struct regulator_init_data *vmmc1;
struct regulator_init_data *vmmc2;
struct regulator_init_data *vsim;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
struct regulator_init_data *vaux4;
-
- /* REVISIT more to come ... _nothing_ should be hard-wired */
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
};
/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
* VIO is generally fixed.
*/
+/* TWL4030 SMPS/LDO's */
/* EXTERNAL dc-to-dc buck converters */
#define TWL4030_REG_VDD1 0
#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
#define TWL4030_REG_VUSB1V8 18
#define TWL4030_REG_VUSB3V1 19
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+
#endif /* End of __TWL4030_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c9..5ed8b9c50355 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,6 +111,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
+#define INIT_JOURNAL_INFO .journal_info = NULL,
+#else
+#define INIT_JOURNAL_INFO
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,10 +168,9 @@ extern struct cred init_cred;
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
@@ -173,6 +178,7 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
+ INIT_JOURNAL_INFO \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2b..d8e9b3d1c23c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
__builtin_extract_return_addr((void *)addr));
}
-/*
- * Pretty-print a function pointer. This function is deprecated.
- * Please use the "%pF" vsprintf format instead.
- */
-static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- addr = *(void **)addr;
-#endif
- print_symbol(fmt, (unsigned long)addr);
-}
-
static inline void print_ip_sym(unsigned long ip)
{
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf12..4d9c916d06d9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static bool __print_once = true; \
+ static bool __print_once; \
\
- if (__print_once) { \
- __print_once = false; \
+ if (!__print_once) { \
+ __print_once = true; \
printk(x); \
} \
})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#elif defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(fmt, ##__VA_ARGS__); \
- } while (0)
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) ({ \
+ static struct ratelimit_state _rs = { \
+ .interval = DEFAULT_RATELIMIT_INTERVAL, \
+ .burst = DEFAULT_RATELIMIT_BURST, \
+ }; \
+ \
+ if (!__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_ratelimited printk
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
+ ##__VA_ARGS__); 0; })
+#endif
+
+/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5d..bed5f16ba827 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
#include <linux/bitops.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/vmstat.h>
+
+struct stable_node;
+struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,23 +38,60 @@ static inline void ksm_exit(struct mm_struct *mm)
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
- return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
/*
- * But we have to avoid the checking which page_add_anon_rmap() performs.
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
*/
-static inline void page_add_ksm_rmap(struct page *page)
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- page->mapping = (void *) PAGE_MAPPING_ANON;
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
+ struct anon_vma *anon_vma = page_anon_vma(page);
+
+ if (!anon_vma ||
+ (anon_vma == vma->anon_vma &&
+ page->index == linear_page_index(vma, address)))
+ return page;
+
+ return ksm_does_need_to_copy(page, vma, address);
}
+
+int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
#else /* !CONFIG_KSM */
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -73,7 +114,32 @@ static inline int PageKsm(struct page *page)
return 0;
}
-/* No stub required for page_add_ksm_rmap(page) */
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
+{
+ return 0;
+}
+
+static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+{
+ return 0;
+}
+
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ return 0;
+}
+
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
#endif /* !CONFIG_KSM */
-#endif
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4c..f1ca0dcc1628 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
#define LIS3_WAKEUP_Z_HI (1 << 5)
unsigned char wakeup_flags;
unsigned char wakeup_thresh;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a41..35b07b773e6c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f1..1cc966cd3e5f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
return node_zonelist(0, gfp_flags);
}
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 000000000000..f41b428d2cec
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
+/*
+ * Marvell 88PM8607 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM8607_H
+#define __LINUX_MFD_88PM8607_H
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define CHIP_ID (0x40)
+#define CHIP_ID_MASK (0xF8)
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_MISC1 (0x40)
+
+/* bit definitions for PM8607 events */
+#define PM8607_EVENT_ONKEY (1 << 0)
+#define PM8607_EVENT_EXTON (1 << 1)
+#define PM8607_EVENT_CHG (1 << 2)
+#define PM8607_EVENT_BAT (1 << 3)
+#define PM8607_EVENT_RTC (1 << 4)
+#define PM8607_EVENT_CC (1 << 5)
+#define PM8607_EVENT_VBAT (1 << 8)
+#define PM8607_EVENT_VCHG (1 << 9)
+#define PM8607_EVENT_VSYS (1 << 10)
+#define PM8607_EVENT_TINT (1 << 11)
+#define PM8607_EVENT_GPADC0 (1 << 12)
+#define PM8607_EVENT_GPADC1 (1 << 13)
+#define PM8607_EVENT_GPADC2 (1 << 14)
+#define PM8607_EVENT_GPADC3 (1 << 15)
+#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
+#define PM8607_EVENT_PEN (1 << 17)
+#define PM8607_EVENT_HEADSET (1 << 18)
+#define PM8607_EVENT_HOOK (1 << 19)
+#define PM8607_EVENT_MICIN (1 << 20)
+#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
+#define PM8607_EVENT_CHG_DONE (1 << 22)
+#define PM8607_EVENT_CHG_FAULT (1 << 23)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_MISC1_PI2C (1 << 0)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+
+struct pm8607_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *client;
+
+ int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
+ int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned char chip_id;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm8607_platform_data {
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern int pm8607_reg_read(struct pm8607_chip *, int);
+extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
+extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
+ unsigned char);
+#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 000000000000..a42a7033ae53
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * AB4500 device core funtions, for client access
+ */
+#ifndef MFD_AB4500_H
+#define MFD_AB4500_H
+
+#include <linux/device.h>
+
+/*
+ * AB4500 bank addresses
+ */
+#define AB4500_SYS_CTRL1_BLOCK 0x1
+#define AB4500_SYS_CTRL2_BLOCK 0x2
+#define AB4500_REGU_CTRL1 0x3
+#define AB4500_REGU_CTRL2 0x4
+#define AB4500_USB 0x5
+#define AB4500_TVOUT 0x6
+#define AB4500_DBI 0x7
+#define AB4500_ECI_AV_ACC 0x8
+#define AB4500_RESERVED 0x9
+#define AB4500_GPADC 0xA
+#define AB4500_CHARGER 0xB
+#define AB4500_GAS_GAUGE 0xC
+#define AB4500_AUDIO 0xD
+#define AB4500_INTERRUPT 0xE
+#define AB4500_RTC 0xF
+#define AB4500_MISC 0x10
+#define AB4500_DEBUG 0x12
+#define AB4500_PROD_TEST 0x13
+#define AB4500_OTP_EMUL 0x15
+
+/*
+ * System control 1 register offsets.
+ * Bank = 0x01
+ */
+#define AB4500_TURNON_STAT_REG 0x0100
+#define AB4500_RESET_STAT_REG 0x0101
+#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
+
+#define AB4500_FSM_STAT1_REG 0x0140
+#define AB4500_FSM_STAT2_REG 0x0141
+#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
+#define AB4500_USB_STAT1_REG 0x0143
+#define AB4500_USB_STAT2_REG 0x0144
+#define AB4500_STATUS_SPARE1_REG 0x0145
+#define AB4500_STATUS_SPARE2_REG 0x0146
+
+#define AB4500_CTRL1_REG 0x0180
+#define AB4500_CTRL2_REG 0x0181
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB4500_CTRL3_REG 0x0200
+#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
+#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
+#define AB4500_LOW_BAT_REG 0x0203
+#define AB4500_BATT_OK_REG 0x0204
+#define AB4500_SYSCLK_TIMER_REG 0x0205
+#define AB4500_SMPSCLK_CTRL_REG 0x0206
+#define AB4500_SMPSCLK_SEL1_REG 0x0207
+#define AB4500_SMPSCLK_SEL2_REG 0x0208
+#define AB4500_SMPSCLK_SEL3_REG 0x0209
+#define AB4500_SYSULPCLK_CONF_REG 0x020A
+#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
+#define AB4500_SYSCLK_CTRL_REG 0x020C
+#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
+#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
+#define AB4500_SYSCTRL_SPARE_REG 0x020F
+#define AB4500_PAD_CONF_REG 0x0210
+
+/*
+ * Regu control1 register offsets
+ * Bank = 0x03
+ */
+#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
+#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
+#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
+#define AB4500_REGU_REQ_CTRL1_REG 0x0303
+#define AB4500_REGU_REQ_CTRL2_REG 0x0304
+#define AB4500_REGU_REQ_CTRL3_REG 0x0305
+#define AB4500_REGU_REQ_CTRL4_REG 0x0306
+#define AB4500_REGU_MISC1_REG 0x0380
+#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
+#define AB4500_REGU_VUSB_CTRL_REG 0x0382
+#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
+#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
+
+/*
+ * Regu control2 Vmod register offsets
+ */
+#define AB4500_REGU_VMOD_REGU_REG 0x0440
+#define AB4500_REGU_VMOD_SEL1_REG 0x0441
+#define AB4500_REGU_VMOD_SEL2_REG 0x0442
+#define AB4500_REGU_CTRL_DISCH_REG 0x0443
+#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB4500_USB_LINE_STAT_REG 0x0580
+#define AB4500_USB_LINE_CTRL1_REG 0x0581
+#define AB4500_USB_LINE_CTRL2_REG 0x0582
+#define AB4500_USB_LINE_CTRL3_REG 0x0583
+#define AB4500_USB_LINE_CTRL4_REG 0x0584
+#define AB4500_USB_LINE_CTRL5_REG 0x0585
+#define AB4500_USB_OTG_CTRL_REG 0x0587
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_CTRL_SPARE_REG 0x0589
+#define AB4500_USB_PHY_CTRL_REG 0x058A
+
+/*
+ * TVOUT / CTRL register offsets
+ * Bank : 0x06
+ */
+#define AB4500_TVOUT_CTRL_REG 0x0680
+
+/*
+ * DBI register offsets
+ * Bank : 0x07
+ */
+#define AB4500_DBI_REG1_REG 0x0700
+#define AB4500_DBI_REG2_REG 0x0701
+
+/*
+ * ECI regsiter offsets
+ * Bank : 0x08
+ */
+#define AB4500_ECI_CTRL_REG 0x0800
+#define AB4500_ECI_HOOKLEVEL_REG 0x0801
+#define AB4500_ECI_DATAOUT_REG 0x0802
+#define AB4500_ECI_DATAIN_REG 0x0803
+
+/*
+ * AV Connector register offsets
+ * Bank : 0x08
+ */
+#define AB4500_AV_CONN_REG 0x0840
+
+/*
+ * Accessory detection register offsets
+ * Bank : 0x08
+ */
+#define AB4500_ACC_DET_DB1_REG 0x0880
+#define AB4500_ACC_DET_DB2_REG 0x0881
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB4500_GPADC_CTRL1_REG 0x0A00
+#define AB4500_GPADC_CTRL2_REG 0x0A01
+#define AB4500_GPADC_CTRL3_REG 0x0A02
+#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
+#define AB4500_GPADC_STAT_REG 0x0A04
+#define AB4500_GPADC_MANDATAL_REG 0x0A05
+#define AB4500_GPADC_MANDATAH_REG 0x0A06
+#define AB4500_GPADC_AUTODATAL_REG 0x0A07
+#define AB4500_GPADC_AUTODATAH_REG 0x0A08
+#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_STATUS1_REG 0x0B00
+#define AB4500_CH_STATUS2_REG 0x0B01
+#define AB4500_CH_USBCH_STAT1_REG 0x0B02
+#define AB4500_CH_USBCH_STAT2_REG 0x0B03
+#define AB4500_CH_FSM_STAT_REG 0x0B04
+#define AB4500_CH_STAT_REG 0x0B05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_VOLT_LVL_REG 0x0B40
+
+/*
+ * Charger / main control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_MCH_CTRL1 0x0B80
+#define AB4500_MCH_CTRL2 0x0B81
+#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
+#define AB4500_CH_WD_REG 0x0B83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB4500_USBCH_CTRL1_REG 0x0BC0
+#define AB4500_USBCH_CTRL2_REG 0x0BC1
+#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
+
+/*
+ * RTC bank register offsets
+ * Bank : 0xF
+ */
+#define AB4500_RTC_SOFF_STAT_REG 0x0F00
+#define AB4500_RTC_CC_CONF_REG 0x0F01
+#define AB4500_RTC_READ_REQ_REG 0x0F02
+#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB4500_RTC_STAT_REG 0x0F0B
+#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB4500_RTC_CALIB_REG 0x0F0E
+#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB4500_PWM_OUT_CTRL1_REG 0x1060
+#define AB4500_PWM_OUT_CTRL2_REG 0x1061
+#define AB4500_PWM_OUT_CTRL3_REG 0x1062
+#define AB4500_PWM_OUT_CTRL4_REG 0x1063
+#define AB4500_PWM_OUT_CTRL5_REG 0x1064
+#define AB4500_PWM_OUT_CTRL6_REG 0x1065
+#define AB4500_PWM_OUT_CTRL7_REG 0x1066
+
+#define AB4500_I2C_PAD_CTRL_REG 0x1067
+#define AB4500_REV_REG 0x1080
+
+/**
+ * struct ab4500
+ * @spi: spi device structure
+ * @tx_buf: transmit buffer
+ * @rx_buf: receive buffer
+ * @lock: sync primitive
+ */
+struct ab4500 {
+ struct spi_device *spi;
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+ struct mutex lock;
+};
+
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data);
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr);
+
+#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 000000000000..ac37558a4673
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc31..40c372165f3e 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_LED_4MA 1
#define PCAP_LED_5MA 2
#define PCAP_LED_9MA 3
-#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
-#define PCAP_LED_GPIO_EN 0x01000000
-#define PCAP_LED_GPIO_INVERT 0x02000000
#define PCAP_LED_T_MASK 0xf
#define PCAP_LED_C_MASK 0x3
#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f16..95cf9360553f 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-
-struct mc13783_irq {
- void (*handler)(int, void *);
- void *data;
-};
-
-#define MC13783_NUM_IRQ 2
-#define MC13783_IRQ_TS 0
-#define MC13783_IRQ_REGULATOR 1
-
-#define MC13783_ADC_MODE_TS 1
-#define MC13783_ADC_MODE_SINGLE_CHAN 2
-#define MC13783_ADC_MODE_MULT_CHAN 3
+#include <linux/interrupt.h>
struct mc13783 {
- int revision;
- struct device *dev;
- struct spi_device *spi_device;
-
- int (*read_dev)(void *data, char reg, int count, u32 *dst);
- int (*write_dev)(void *data, char reg, int count, const u32 *src);
-
- struct mutex io_lock;
- void *io_data;
+ struct spi_device *spidev;
+ struct mutex lock;
int irq;
- unsigned int flags;
+ int flags;
- struct mc13783_irq irq_handler[MC13783_NUM_IRQ];
- struct work_struct work;
- struct completion adc_done;
- unsigned int ts_active;
- struct mutex adc_conv_lock;
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+ /* XXX these should go as platformdata to the regulator subdevice */
struct mc13783_regulator_init_data *regulators;
int num_regulators;
};
-int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
-int mc13783_reg_write(struct mc13783 *, int, u32);
-int mc13783_set_bits(struct mc13783 *, int, u32, u32);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data);
-
#define MC13783_REG_INTERRUPT_STATUS_0 0
#define MC13783_REG_INTERRUPT_MASK_0 1
#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_REG_TEST_3 63
#define MC13783_REG_NB 64
-
-/*
- * Interrupt Status
- */
-#define MC13783_INT_STAT_ADCDONEI (1 << 0)
-#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
-#define MC13783_INT_STAT_TSI (1 << 2)
-#define MC13783_INT_STAT_WHIGHI (1 << 3)
-#define MC13783_INT_STAT_WLOWI (1 << 4)
-#define MC13783_INT_STAT_CHGDETI (1 << 6)
-#define MC13783_INT_STAT_CHGOVI (1 << 7)
-#define MC13783_INT_STAT_CHGREVI (1 << 8)
-#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
-#define MC13783_INT_STAT_CCCVI (1 << 10)
-#define MC13783_INT_STAT_CHGCURRI (1 << 11)
-#define MC13783_INT_STAT_BPONI (1 << 12)
-#define MC13783_INT_STAT_LOBATLI (1 << 13)
-#define MC13783_INT_STAT_LOBATHI (1 << 14)
-#define MC13783_INT_STAT_UDPI (1 << 15)
-#define MC13783_INT_STAT_USBI (1 << 16)
-#define MC13783_INT_STAT_IDI (1 << 19)
-#define MC13783_INT_STAT_Unused (1 << 20)
-#define MC13783_INT_STAT_SE1I (1 << 21)
-#define MC13783_INT_STAT_CKDETI (1 << 22)
-#define MC13783_INT_STAT_UDMI (1 << 23)
-
-/*
- * Interrupt Mask
- */
-#define MC13783_INT_MASK_ADCDONEM (1 << 0)
-#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
-#define MC13783_INT_MASK_TSM (1 << 2)
-#define MC13783_INT_MASK_WHIGHM (1 << 3)
-#define MC13783_INT_MASK_WLOWM (1 << 4)
-#define MC13783_INT_MASK_CHGDETM (1 << 6)
-#define MC13783_INT_MASK_CHGOVM (1 << 7)
-#define MC13783_INT_MASK_CHGREVM (1 << 8)
-#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
-#define MC13783_INT_MASK_CCCVM (1 << 10)
-#define MC13783_INT_MASK_CHGCURRM (1 << 11)
-#define MC13783_INT_MASK_BPONM (1 << 12)
-#define MC13783_INT_MASK_LOBATLM (1 << 13)
-#define MC13783_INT_MASK_LOBATHM (1 << 14)
-#define MC13783_INT_MASK_UDPM (1 << 15)
-#define MC13783_INT_MASK_USBM (1 << 16)
-#define MC13783_INT_MASK_IDM (1 << 19)
-#define MC13783_INT_MASK_SE1M (1 << 21)
-#define MC13783_INT_MASK_CKDETM (1 << 22)
-
/*
* Reg Regulator Mode 0
*/
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_SWCTRL_SW3_STBY (1 << 21)
#define MC13783_SWCTRL_SW3_MODE (1 << 22)
-/*
- * ADC/Touch
- */
-#define MC13783_ADC0_LICELLCON (1 << 0)
-#define MC13783_ADC0_CHRGICON (1 << 1)
-#define MC13783_ADC0_BATICON (1 << 2)
-#define MC13783_ADC0_RTHEN (1 << 3)
-#define MC13783_ADC0_DTHEN (1 << 4)
-#define MC13783_ADC0_UIDEN (1 << 5)
-#define MC13783_ADC0_ADOUTEN (1 << 6)
-#define MC13783_ADC0_ADOUTPER (1 << 7)
-#define MC13783_ADC0_ADREFEN (1 << 10)
-#define MC13783_ADC0_ADREFMODE (1 << 11)
-#define MC13783_ADC0_TSMOD0 (1 << 12)
-#define MC13783_ADC0_TSMOD1 (1 << 13)
-#define MC13783_ADC0_TSMOD2 (1 << 14)
-#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
-#define MC13783_ADC0_ADINC1 (1 << 16)
-#define MC13783_ADC0_ADINC2 (1 << 17)
-#define MC13783_ADC0_WCOMP (1 << 18)
-#define MC13783_ADC0_ADCBIS0 (1 << 23)
-
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_TRIGMASK (1 << 4)
-#define MC13783_ADC1_ADA10 (1 << 5)
-#define MC13783_ADC1_ADA11 (1 << 6)
-#define MC13783_ADC1_ADA12 (1 << 7)
-#define MC13783_ADC1_ADA20 (1 << 8)
-#define MC13783_ADC1_ADA21 (1 << 9)
-#define MC13783_ADC1_ADA22 (1 << 10)
-#define MC13783_ADC1_ATO0 (1 << 11)
-#define MC13783_ADC1_ATO1 (1 << 12)
-#define MC13783_ADC1_ATO2 (1 << 13)
-#define MC13783_ADC1_ATO3 (1 << 14)
-#define MC13783_ADC1_ATO4 (1 << 15)
-#define MC13783_ADC1_ATO5 (1 << 16)
-#define MC13783_ADC1_ATO6 (1 << 17)
-#define MC13783_ADC1_ATO7 (1 << 18)
-#define MC13783_ADC1_ATOX (1 << 19)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC1_ADONESHOT (1 << 22)
-#define MC13783_ADC1_ADCBIS1 (1 << 23)
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-#define MC13783_ADC2_ADD10 (1 << 2)
-#define MC13783_ADC2_ADD11 (1 << 3)
-#define MC13783_ADC2_ADD12 (1 << 4)
-#define MC13783_ADC2_ADD13 (1 << 5)
-#define MC13783_ADC2_ADD14 (1 << 6)
-#define MC13783_ADC2_ADD15 (1 << 7)
-#define MC13783_ADC2_ADD16 (1 << 8)
-#define MC13783_ADC2_ADD17 (1 << 9)
-#define MC13783_ADC2_ADD18 (1 << 10)
-#define MC13783_ADC2_ADD19 (1 << 11)
-#define MC13783_ADC2_ADD20 (1 << 14)
-#define MC13783_ADC2_ADD21 (1 << 15)
-#define MC13783_ADC2_ADD22 (1 << 16)
-#define MC13783_ADC2_ADD23 (1 << 17)
-#define MC13783_ADC2_ADD24 (1 << 18)
-#define MC13783_ADC2_ADD25 (1 << 19)
-#define MC13783_ADC2_ADD26 (1 << 20)
-#define MC13783_ADC2_ADD27 (1 << 21)
-#define MC13783_ADC2_ADD28 (1 << 22)
-#define MC13783_ADC2_ADD29 (1 << 23)
+static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ mc13783_lock(mc13783);
+ ret = mc13783_reg_rmw(mc13783, offset, mask, val);
+ mc13783_unlock(mc13783);
-#define MC13783_ADC3_WHIGH0 (1 << 0)
-#define MC13783_ADC3_WHIGH1 (1 << 1)
-#define MC13783_ADC3_WHIGH2 (1 << 2)
-#define MC13783_ADC3_WHIGH3 (1 << 3)
-#define MC13783_ADC3_WHIGH4 (1 << 4)
-#define MC13783_ADC3_WHIGH5 (1 << 5)
-#define MC13783_ADC3_ICID0 (1 << 6)
-#define MC13783_ADC3_ICID1 (1 << 7)
-#define MC13783_ADC3_ICID2 (1 << 8)
-#define MC13783_ADC3_WLOW0 (1 << 9)
-#define MC13783_ADC3_WLOW1 (1 << 10)
-#define MC13783_ADC3_WLOW2 (1 << 11)
-#define MC13783_ADC3_WLOW3 (1 << 12)
-#define MC13783_ADC3_WLOW4 (1 << 13)
-#define MC13783_ADC3_WLOW5 (1 << 14)
-#define MC13783_ADC3_ADCBIS2 (1 << 23)
-
-#define MC13783_ADC4_ADDBIS10 (1 << 2)
-#define MC13783_ADC4_ADDBIS11 (1 << 3)
-#define MC13783_ADC4_ADDBIS12 (1 << 4)
-#define MC13783_ADC4_ADDBIS13 (1 << 5)
-#define MC13783_ADC4_ADDBIS14 (1 << 6)
-#define MC13783_ADC4_ADDBIS15 (1 << 7)
-#define MC13783_ADC4_ADDBIS16 (1 << 8)
-#define MC13783_ADC4_ADDBIS17 (1 << 9)
-#define MC13783_ADC4_ADDBIS18 (1 << 10)
-#define MC13783_ADC4_ADDBIS19 (1 << 11)
-#define MC13783_ADC4_ADDBIS20 (1 << 14)
-#define MC13783_ADC4_ADDBIS21 (1 << 15)
-#define MC13783_ADC4_ADDBIS22 (1 << 16)
-#define MC13783_ADC4_ADDBIS23 (1 << 17)
-#define MC13783_ADC4_ADDBIS24 (1 << 18)
-#define MC13783_ADC4_ADDBIS25 (1 << 19)
-#define MC13783_ADC4_ADDBIS26 (1 << 20)
-#define MC13783_ADC4_ADDBIS27 (1 << 21)
-#define MC13783_ADC4_ADDBIS28 (1 << 22)
-#define MC13783_ADC4_ADDBIS29 (1 << 23)
+ return ret;
+}
#endif /* __LINUX_MFD_MC13783_PRIV_H */
-
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a7243573..35680409b8cf 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
-#ifndef __INCLUDE_LINUX_MFD_MC13783_H
-#define __INCLUDE_LINUX_MFD_MC13783_H
+#include <linux/interrupt.h>
struct mc13783;
+
+void mc13783_lock(struct mc13783 *mc13783);
+void mc13783_unlock(struct mc13783 *mc13783);
+
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
+int mc13783_ackirq(struct mc13783 *mc13783, int irq);
+
+int mc13783_mask(struct mc13783 *mc13783, int irq);
+int mc13783_unmask(struct mc13783 *mc13783, int irq);
+
+#define MC13783_ADC0 43
+#define MC13783_ADC0_ADREFEN (1 << 10)
+#define MC13783_ADC0_ADREFMODE (1 << 11)
+#define MC13783_ADC0_TSMOD0 (1 << 12)
+#define MC13783_ADC0_TSMOD1 (1 << 13)
+#define MC13783_ADC0_TSMOD2 (1 << 14)
+#define MC13783_ADC0_ADINC1 (1 << 16)
+#define MC13783_ADC0_ADINC2 (1 << 17)
+
+#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
+ MC13783_ADC0_TSMOD1 | \
+ MC13783_ADC0_TSMOD2)
+
+/* to be cleaned up */
struct regulator_init_data;
struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
struct regulator_init_data *init_data;
};
-struct mc13783_platform_data {
- struct mc13783_regulator_init_data *regulators;
+struct mc13783_regulator_platform_data {
int num_regulators;
- unsigned int flags;
+ struct mc13783_regulator_init_data *regulators;
};
-/* mc13783_platform_data flags */
+struct mc13783_platform_data {
+ int num_regulators;
+ struct mc13783_regulator_init_data *regulators;
+
#define MC13783_USE_TOUCHSCREEN (1 << 0)
#define MC13783_USE_CODEC (1 << 1)
#define MC13783_USE_ADC (1 << 2)
#define MC13783_USE_RTC (1 << 3)
#define MC13783_USE_REGULATOR (1 << 4)
+ unsigned int flags;
+};
+
+#define MC13783_ADC_MODE_TS 1
+#define MC13783_ADC_MODE_SINGLE_CHAN 2
+#define MC13783_ADC_MODE_MULT_CHAN 3
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_SW_SW1A 0
#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
-#endif /* __INCLUDE_LINUX_MFD_MC13783_H */
+#define MC13783_IRQ_ADCDONE 0
+#define MC13783_IRQ_ADCBISDONE 1
+#define MC13783_IRQ_TS 2
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET 6
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV 8
+#define MC13783_IRQ_CHGSHORT 9
+#define MC13783_IRQ_CCCV 10
+#define MC13783_IRQ_CHGCURR 11
+#define MC13783_IRQ_BPON 12
+#define MC13783_IRQ_LOBATL 13
+#define MC13783_IRQ_LOBATH 14
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ 24
+#define MC13783_IRQ_TODA 25
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST 30
+#define MC13783_IRQ_RTCRST 31
+#define MC13783_IRQ_PC 32
+#define MC13783_IRQ_WARM 33
+#define MC13783_IRQ_MEMHLD 34
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL 36
+#define MC13783_IRQ_THWARNH 37
+#define MC13783_IRQ_CLK 38
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+#define MC13783_NUM_IRQ 46
+#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fbc..3398bd9aab11 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
- int charging_restart_interval;
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
u8 resumers[5];
};
-struct pcf50633_subdev_pdata {
- struct pcf50633 *pcf;
-};
-
struct pcf50633_irq {
void (*handler) (int, void *);
void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
#define PCF50633_REG_LEDCTL 0x2a
#define PCF50633_REG_LEDDIM 0x2b
-#endif
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2c..df4f5fa88de3 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14c..5184b79c700b 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
#define __MFD_WM831X_CORE_H__
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
/*
* Register values.
@@ -117,6 +116,7 @@
#define WM831X_DC3_SLEEP_CONTROL 0x4063
#define WM831X_DC4_CONTROL 0x4064
#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
#define WM831X_EPE1_CONTROL 0x4066
#define WM831X_EPE2_CONTROL 0x4067
#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
struct regulator_dev;
+#define WM831X_NUM_IRQ_REGS 5
+
struct wm831x {
struct mutex io_lock;
@@ -248,10 +250,11 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- struct workqueue_struct *irq_wq;
- struct work_struct irq_work;
unsigned int irq_base;
- int irq_masks[5];
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ int num_gpio;
struct mutex auxadc_lock;
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
-int __must_check wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *);
-void wm831x_disable_irq(struct wm831x *wm831x, int irq);
-void wm831x_enable_irq(struct wm831x *wm831x, int irq);
+static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
+ unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name,
+ void *dev)
+{
+ return request_threaded_irq(irq, NULL, handler, flags, name, dev);
+}
+
+static inline void wm831x_free_irq(struct wm831x *wm831x,
+ unsigned int irq, void *dev)
+{
+ free_irq(irq, dev);
+}
+
+static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
+{
+ disable_irq(irq);
+}
+
+static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+{
+ enable_irq(irq);
+}
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aad..415c228743d5 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -91,6 +91,7 @@ struct wm831x_pdata {
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a055..43868899bf49 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
struct wm8350_irq {
- void (*handler) (struct wm8350 *, int, void *);
+ irq_handler_t handler;
void *data;
};
@@ -646,10 +646,12 @@ struct wm8350 {
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
+ int irq_base;
};
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
* WM8350 internal interrupts
*/
int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data);
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data);
int wm8350_free_irq(struct wm8350 *wm8350, int irq);
int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
-
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d298..71af3d6ebe9d 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
#define WM8350_GPIO_DEBOUNCE_ON 1
/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
* R128 (0x80) - GPIO Debounce
*/
#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1c..7f085c97c799 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
-extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private) { return -ENOSYS; }
-
-static inline int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest) { return 0; }
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4d..9d65ae4ba0e0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else
-#endif
- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+static inline void *page_rmapping(struct page *page)
+{
+ return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c060208109..9b8299af3741 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
#define SEQ4_STATUS_LEASE_MOVED 0x00000080
#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
+#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
#define NFS4_MAX_UINT64 (~(u64)0)
@@ -528,6 +530,7 @@ enum {
NFSPROC4_CLNT_DESTROY_SESSION,
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3b..34fc6be5bfcf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c8..51071b335751 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -170,8 +170,9 @@ struct nfs4_sequence_args {
struct nfs4_sequence_res {
struct nfs4_session *sr_session;
u8 sr_slotid; /* slot used to send request */
- unsigned long sr_renewal_time;
int sr_status; /* sequence operation status */
+ unsigned long sr_renewal_time;
+ u32 sr_status_flags;
};
struct nfs4_get_lease_time_args {
@@ -938,6 +939,16 @@ struct nfs41_create_session_args {
struct nfs41_create_session_res {
struct nfs_client *client;
};
+
+struct nfs41_reclaim_complete_args {
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a86..06292dac3eab 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
};
struct memory_block;
extern struct node node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9e..454997cccbd8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(node); \
+ m.bits[0] = 1UL << (node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
*/
-
-#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
-#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define NODEMASK_FREE(m) kfree(m)
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
#else
-#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
-#define NODEMASK_FREE(m)
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
+#define NODEMASK_FREE(m) do {} while (0)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
nodemask_t mask2;
};
-#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
-#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b159..3aaa31603a86 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b173955..49e907bd067f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 04771b9c3316..bf1e67080849 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1255,7 +1255,7 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern u8 pci_dfl_cache_line_size;
+extern u8 __devinitdata pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd4..5a5d6ce4bd55 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..cf5efbcf716c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
- void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ return __pa(addr);
+}
+
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
- __alignof__(type))
+#define alloc_percpu(type) \
+ (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable) \
+({ typeof(variable) pscr_ret__; \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable);break; \
+ case 2: pscr_ret__ = stem##2(variable);break; \
+ case 4: pscr_ret__ = stem##4(variable);break; \
+ case 8: pscr_ret__ = stem##8(variable);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ * The arch must then ensure that the various scalar format passed
+ * are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ * sized RMW actions. If arch code does not provide operations for
+ * a scalar size then the fallback in the generic code will be
+ * used.
+ */
+
+#define _this_cpu_generic_read(pcp) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = *this_cpu_ptr(&(pcp)); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ preempt_disable(); \
+ *__this_cpu_ptr(&pcp) op val; \
+ preempt_enable(); \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *__this_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ *__this_cpu_ptr(&(pcp)) op val; \
+ local_irq_restore(flags); \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64a53f74c9a9..da7bdc23f279 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -681,7 +681,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246af..198b8f9fe05e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-struct dev_pm_ops name = { \
+const struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285c..1cbbd2c11aa9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
/* Selected algorithm */
extern struct raid6_calls raid6_call;
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
int raid6_select_algo(void);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba7032609..b019ae64e2ab 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+#ifdef CONFIG_KSM
+ atomic_t ksm_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
+#ifdef CONFIG_KSM
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->ksm_refcount, 0);
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->ksm_refcount);
+}
+#else
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
/*
* rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
+#else
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261f..bdfcc2527970 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->activity != 0);
-}
+extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 294eb2f80144..5c858f38e81a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,7 +1409,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -1446,8 +1446,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
/* journalling filesystem info */
void *journal_info;
+#endif
/* stacked block device info */
struct bio *bio_list, **bio_tail;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500de..ca6b2b317991 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
@@ -166,7 +166,7 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd74..1e14beb23f9b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h
deleted file mode 100644
index d1004cf09241..000000000000
--- a/include/linux/spi/mpc52xx_spi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#ifndef INCLUDE_MPC5200_SPI_H
-#define INCLUDE_MPC5200_SPI_H
-
-extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master,
- void (*hook)(struct spi_message *m,
- void *context),
- void *hook_context);
-
-#endif
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 000000000000..2e8db3d2d2e5
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 000000000000..6f17278810b0
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ bool little_endian;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -75,12 +75,12 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-#endif
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#endif
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
-
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
-}
-
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
#include <linux/lockdep.h>
-typedef struct {
- raw_spinlock_t raw_lock;
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define RW_DEP_MAP_INIT(lockname)
+# define SPIN_DEBUG_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
-#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5ad..651839a2a755 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
-extern char * __must_check strstrip(char *);
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc0..1906782ec86b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a4..a2602a8207a6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7ffe
-#define SWAP_MAP_BAD 0x7fff
-#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
-#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
- unsigned long flags;
- int prio; /* swap priority */
- int next; /* next entry on swap list */
- struct file *swap_file;
- struct block_device *bdev;
- struct list_head extent_list;
- struct swap_extent *curr_swap_extent;
- unsigned short *swap_map;
- unsigned int lowest_bit;
- unsigned int highest_bit;
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ signed char type; /* strange name for an index */
+ signed char next; /* next type on the swap list */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
unsigned int lowest_alloc; /* while preparing discard cluster */
unsigned int highest_alloc; /* while preparing discard cluster */
- unsigned int cluster_next;
- unsigned int cluster_nr;
- unsigned int pages;
- unsigned int max;
- unsigned int inuse_pages;
- unsigned int old_block_size;
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
};
struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern void swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
#define free_swap_and_cache(swp) is_migration_entry(swp)
#define swapcache_prepare(swp) is_migration_entry(swp)
-static inline void swap_duplicate(swp_entry_t swp)
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
}
static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe40..ef3a2947b102 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
extern struct ktermios tty_std_termios;
-extern int kmsg_redirect;
-
extern void console_init(void);
extern int vcs_init(void);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a8..ee03bba9c5df 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states).event[item]++;
+ __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states).event[item]++;
- put_cpu();
+ this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states).event[item] += delta;
+ __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states).event[item] += delta;
- put_cpu();
+ this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f06232..3fb9944e50a6 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,19 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
#endif /* _LINUX_VT_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0302f31a2fb7..b0173202cad9 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -88,12 +88,7 @@ struct neigh_statistics {
unsigned long unres_discards; /* number of unresolved drops */
};
-#define NEIGH_CACHE_STAT_INC(tbl, field) \
- do { \
- preempt_disable(); \
- (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
- preempt_enable(); \
- } while (0)
+#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
struct neighbour *next;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5cf7270e3ffc..a0904adfb8f7 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
#define NF_CT_STAT_INC(net, count) \
- (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
+ __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) \
do { \
local_bh_disable(); \
- per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \
+ __this_cpu_inc((net)->ct.stat->count); \
local_bh_enable(); \
} while (0)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8c842e06bec8..f0d756f2ac99 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
#define SNMP_STAT_BHPTR(name) (name[0])
#define SNMP_STAT_USRPTR(name) (name[1])
-#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
-#define SNMP_INC_STATS_USER(mib, field) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_INC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_DEC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
+#define SNMP_INC_STATS_BH(mib, field) \
+ __this_cpu_inc(mib[0]->mibs[field])
+#define SNMP_INC_STATS_USER(mib, field) \
+ this_cpu_inc(mib[1]->mibs[field])
+#define SNMP_INC_STATS(mib, field) \
+ this_cpu_inc(mib[!in_softirq()]->mibs[field])
+#define SNMP_DEC_STATS(mib, field) \
+ this_cpu_dec(mib[!in_softirq()]->mibs[field])
+#define SNMP_ADD_STATS_BH(mib, field, addend) \
+ __this_cpu_add(mib[0]->mibs[field], addend)
+#define SNMP_ADD_STATS_USER(mib, field, addend) \
+ this_cpu_add(mib[1]->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\
+ __typeof__(mib[0]) ptr; \
+ preempt_disable(); \
+ ptr = this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
- put_cpu(); \
+ preempt_enable(); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\
+ __typeof__(mib[0]) ptr = \
+ __this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
} while (0)
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index afc2bfb9e917..75fa3530345b 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -126,8 +126,8 @@ typedef struct irq_req_t {
#define IRQ_TYPE_TIME 0x01
#define IRQ_TYPE_DYNAMIC_SHARING 0x02
#define IRQ_FORCED_PULSE 0x04
-#define IRQ_FIRST_SHARED 0x08
-//#define IRQ_HANDLE_PRESENT 0x10
+#define IRQ_FIRST_SHARED 0x08 /* unused */
+#define IRQ_HANDLE_PRESENT 0x10 /* unused */
#define IRQ_PULSE_ALLOCATED 0x100
/* Bits in IRQInfo1 field */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index d403c12f7978..ee148573c114 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -82,7 +82,7 @@ struct pcmcia_device {
/* the hardware "function" device; certain subdevices can
* share one hardware "function" device. */
u8 func;
- struct config_t* function_config;
+ struct config_t *function_config;
struct list_head socket_device_list;
@@ -121,14 +121,14 @@ struct pcmcia_device {
u16 manf_id;
u16 card_id;
- char * prod_id[4];
+ char *prod_id[4];
u64 dma_mask;
struct device dev;
#ifdef CONFIG_PCMCIA_IOCTL
/* device driver wanted by cardmgr */
- struct pcmcia_driver * cardmgr;
+ struct pcmcia_driver *cardmgr;
#endif
/* data private to drivers */
diff --git a/include/pcmcia/mem_op.h b/include/pcmcia/mem_op.h
index 8d19b9401a5b..0fa06e5d5376 100644
--- a/include/pcmcia/mem_op.h
+++ b/include/pcmcia/mem_op.h
@@ -15,8 +15,8 @@
#ifndef _LINUX_MEM_OP_H
#define _LINUX_MEM_OP_H
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
/*
If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 7c23be706f12..cbfba885eb85 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -154,7 +154,7 @@ struct pcmcia_socket {
struct list_head socket_list;
struct completion socket_released;
- /* deprecated */
+ /* deprecated */
unsigned int sock; /* socket number */
@@ -164,7 +164,7 @@ struct pcmcia_socket {
u_int map_size;
u_int io_offset;
u_int pci_irq;
- struct pci_dev * cb_dev;
+ struct pci_dev *cb_dev;
/* socket setup is done so resources should be able to be allocated.
@@ -179,9 +179,9 @@ struct pcmcia_socket {
u8 reserved:5;
/* socket operations */
- struct pccard_operations * ops;
- struct pccard_resource_ops * resource_ops;
- void * resource_data;
+ struct pccard_operations *ops;
+ struct pccard_resource_ops *resource_ops;
+ void *resource_data;
/* Zoom video behaviour is so chip specific its not worth adding
this to _ops */
@@ -245,7 +245,7 @@ struct pcmcia_socket {
/* cardbus (32-bit) */
#ifdef CONFIG_CARDBUS
- struct resource * cb_cis_res;
+ struct resource *cb_cis_res;
void __iomem *cb_cis_virt;
#endif /* CONFIG_CARDBUS */
diff --git a/init/Kconfig b/init/Kconfig
index 54c655ce9c04..a23da9f01803 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1079,6 +1079,28 @@ config SLOB
endchoice
+config MMAP_ALLOW_UNINITIALIZED
+ bool "Allow mmapped anonymous memory to be uninitialized"
+ depends on EMBEDDED && !MMU
+ default n
+ help
+ Normally, and according to the Linux spec, anonymous memory obtained
+ from mmap() has it's contents cleared before it is passed to
+ userspace. Enabling this config option allows you to request that
+ mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
+ providing a huge performance boost. If this option is not enabled,
+ then the flag will be ignored.
+
+ This is taken advantage of by uClibc's malloc(), and also by
+ ELF-FDPIC binfmt's brk and stack allocator.
+
+ Because of the obvious security issues, this option should only be
+ enabled on embedded devices where you control what is run in
+ userspace. Since that isn't generally a problem on no-MMU systems,
+ it is normally safe to say Y here.
+
+ See Documentation/nommu-mmap.txt for more information.
+
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
diff --git a/init/main.c b/init/main.c
index 4051d75dd2d6..c3db4a98b369 100644
--- a/init/main.c
+++ b/init/main.c
@@ -691,10 +691,10 @@ asmlinkage void __init start_kernel(void)
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
- ctor_fn_t *call = (ctor_fn_t *) __ctors_start;
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
- for (; call < (ctor_fn_t *) __ctors_end; call++)
- (*call)();
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
#endif
}
@@ -755,10 +755,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
static void __init do_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __early_initcall_end; call < __initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __early_initcall_end; fn < __initcall_end; fn++)
+ do_one_initcall(*fn);
/* Make sure there is no pending stuff from the initcall sequence */
flush_scheduled_work();
@@ -785,10 +785,10 @@ static void __init do_basic_setup(void)
static void __init do_pre_smp_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __initcall_start; call < __early_initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __initcall_start; fn < __early_initcall_end; fn++)
+ do_one_initcall(*fn);
}
static void run_init_process(char *init_filename)
diff --git a/kernel/acct.c b/kernel/acct.c
index 9a4715a2f6bf..a6605ca921b6 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -536,7 +536,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
do_div(elapsed, AHZ);
ac.ac_btime = get_seconds() - elapsed;
/* we really need to bite the bullet and change layout */
- current_uid_gid(&ac.ac_uid, &ac.ac_gid);
+ ac.ac_uid = orig_cred->uid;
+ ac.ac_gid = orig_cred->gid;
#if ACCT_VERSION==2
ac.ac_ahz = AHZ;
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 6f50ef55a6f3..5962d7ccf243 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
- spin_unlock_wait(&tsk->pi_lock);
+ raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
diff --git a/kernel/fork.c b/kernel/fork.c
index 1415dc4598ae..9bd91447e052 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
static void rt_mutex_init_task(struct task_struct *p)
{
- spin_lock_init(&p->pi_lock);
+ raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters, &p->pi_lock);
+ plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
diff --git a/kernel/futex.c b/kernel/futex.c
index d73ef1f3e55d..8e3c3ffe1b9a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
@@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
@@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
}
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
}
static int
@@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
* change of the task flags, we do this protected by
* p->pi_lock:
*/
- spin_lock_irq(&p->pi_lock);
+ raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
@@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
@@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state)
return -EINVAL;
- spin_lock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
else if (curval != uval)
ret = -EINVAL;
if (ret) {
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
- spin_lock_irq(&new_owner->pi_lock);
+ raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
- spin_unlock_irq(&new_owner->pi_lock);
+ raw_spin_unlock_irq(&new_owner->pi_lock);
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
@@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb2->lock;
+ q->list.plist.spinlock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
@@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
q->lock_ptr = &hb->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
wake_up_state(q->task, TASK_NORMAL);
@@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
@@ -1529,18 +1529,18 @@ retry:
* itself.
*/
if (pi_state->owner != NULL) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
- spin_lock_irq(&newowner->pi_lock);
+ raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
- spin_unlock_irq(&newowner->pi_lock);
+ raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d2f9239dc6ba..0086628b6e97 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
for (;;) {
base = timer->base;
if (likely(base != NULL)) {
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
- spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
@@ -208,13 +208,13 @@ again:
/* See the comment in lock_timer_base() */
timer->base = NULL;
- spin_unlock(&base->cpu_base->lock);
- spin_lock(&new_base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
+ raw_spin_lock(&new_base->cpu_base->lock);
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
- spin_unlock(&new_base->cpu_base->lock);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_unlock(&new_base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
struct hrtimer_clock_base *base = timer->base;
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
@@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg)
base = &__get_cpu_var(hrtimer_bases);
/* Adjust CLOCK_REALTIME offset */
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
base->clock_base[CLOCK_REALTIME].offset =
timespec_to_ktime(realtime_offset);
hrtimer_force_reprogram(base, 0);
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
}
/*
@@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (wakeup) {
- spin_unlock(&base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
} else
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
- spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
@@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
unsigned long flags;
int i;
- spin_lock_irqsave(&cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active()) {
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
}
}
- spin_unlock_irqrestore(&cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
if (mindelta.tv64 < 0)
mindelta.tv64 = 0;
@@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
retry:
expires_next.tv64 = KTIME_MAX;
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1317,7 +1317,7 @@ retry:
* against it.
*/
cpu_base->expires_next = expires_next;
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
if (expires_next.tv64 == KTIME_MAX ||
@@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void)
gettime = 0;
}
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
@@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void)
__run_hrtimer(timer, &base->softirq_time);
}
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
}
}
@@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- spin_lock_init(&cpu_base->lock);
+ raw_spin_lock_init(&cpu_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 366eedf949c0..dbcbf6a33a08 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)
list = &ctx->event_list;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* The current breakpoint counter is not included in the list
@@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
count++;
}
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
return count;
}
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 1de9700f416e..2295a31ef110 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
desc->chip->set_type(i, IRQ_TYPE_PROBE);
desc->chip->startup(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
int i;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ba566c261adc..ecc3fa28f666 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
}
/* Ensure we don't have left over values from a previous use of this irq */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
cpumask_clear(desc->pending_mask);
#endif
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
irq);
return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
desc->chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
if (!chip)
chip = &no_irq_chip;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
if (type == IRQ_TYPE_NONE)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_trigger(desc, irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (nest)
desc->status |= IRQ_NESTED_THREAD;
else
desc->status &= ~IRQ_NESTED_THREAD;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
might_sleep();
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -530,7 +530,7 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
}
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
}
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->depth = 0;
desc->chip->startup(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 17c71bb565c6..814940e7f485 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
/*
* Protect the sparse_irqs:
*/
-DEFINE_SPINLOCK(sparse_irq_lock);
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
}
};
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
if (desc)
return desc;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
irq_desc_ptrs[irq] = desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
return 1;
}
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return 1;
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 1b5d742c6a77..b2821f070a3d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
-extern spinlock_t sparse_irq_lock;
+extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7305b297d1eb..eb6078ca60c7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
if (!desc->chip->set_affinity)
return -EINVAL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
}
#endif
desc->status |= IRQ_AFFINITY_SET;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
unsigned long flags;
int ret;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
irq_set_thread_affinity(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action)
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
{
chip_bus_lock(irq, desc);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
desc->chip->unmask(irq);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
}
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
return;
}
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
cpumask_copy(mask, desc->affinity);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (unlikely(desc->status & IRQ_DISABLED)) {
/*
* CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
* retriggers the interrupt itself --- tglx
*/
desc->status |= IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
} else {
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
__enable_irq(desc, irq, false);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
* Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
ret = -EBUSY;
out_thread:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
struct task_struct *t = new->thread;
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!desc)
return NULL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
desc->chip->disable(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index fcb6c96f2627..241962280836 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
if (!desc->chip->set_affinity)
return;
- assert_spin_locked(&desc->lock);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 3fd30197da2e..26bac9d8f860 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
"for migration.\n", irq);
return false;
}
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
irq = old_desc->irq;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
}
irq_desc_ptrs[irq] = desc;
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
return desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index a0bb09e79867..0d4005d85b03 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
if (!(desc->status & IRQ_SUSPENDED))
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
EXPORT_SYMBOL_GPL(resume_device_irqs);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0832145fea97..6f50eccc79c0 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e49ea1c5232d..89fb90ae534f 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
struct irqaction *action;
int ok = 0, work = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
action = desc->action;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
while (action) {
/* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
}
local_irq_disable();
/* Now clean up the flags */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
action = desc->action;
/*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
* Perform real IRQ processing for the IRQ we deferred
*/
work = 1;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
handle_IRQ_event(irq, action);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 4f8df01dbe51..5feaddcdbe49 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
* dropped already)
*/
if (!debug_locks) {
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
/* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
- if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+ if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
current->lockdep_recursion--;
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return ret;
}
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
}
#ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+ cpu_lock_stats);
static inline u64 lockstat_clock(void)
{
@@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
- &per_cpu(lock_stats, cpu)[class - lock_classes];
+ &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
@@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
- &per_cpu(lock_stats, cpu)[class - lock_classes];
+ &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
@@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
- return &get_cpu_var(lock_stats)[class - lock_classes];
+ return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
}
static void put_lock_stats(struct lock_class_stats *stats)
{
- put_cpu_var(lock_stats);
+ put_cpu_var(cpu_lock_stats);
}
static void lock_release_holdtime(struct held_lock *hlock)
@@ -1169,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
@@ -1196,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
diff --git a/kernel/module.c b/kernel/module.c
index 5842a71cf052..12afc5a3ddd3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
free_percpu(freeme);
}
-#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-/* Number of blocks used and allocated. */
-static unsigned int pcpu_num_used, pcpu_num_allocated;
-/* Size of each block. -ve means used. */
-static int *pcpu_size;
-
-static int split_block(unsigned int i, unsigned short size)
-{
- /* Reallocation required? */
- if (pcpu_num_used + 1 > pcpu_num_allocated) {
- int *new;
-
- new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
- GFP_KERNEL);
- if (!new)
- return 0;
-
- pcpu_num_allocated *= 2;
- pcpu_size = new;
- }
-
- /* Insert a new subblock */
- memmove(&pcpu_size[i+1], &pcpu_size[i],
- sizeof(pcpu_size[0]) * (pcpu_num_used - i));
- pcpu_num_used++;
-
- pcpu_size[i+1] -= size;
- pcpu_size[i] = size;
- return 1;
-}
-
-static inline unsigned int block_size(int val)
-{
- if (val < 0)
- return -val;
- return val;
-}
-
-static void *percpu_modalloc(unsigned long size, unsigned long align,
- const char *name)
-{
- unsigned long extra;
- unsigned int i;
- void *ptr;
- int cpu;
-
- if (align > PAGE_SIZE) {
- printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
- name, align, PAGE_SIZE);
- align = PAGE_SIZE;
- }
-
- ptr = __per_cpu_start;
- for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
- /* Extra for alignment requirement. */
- extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
- BUG_ON(i == 0 && extra != 0);
-
- if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
- continue;
-
- /* Transfer extra to previous block. */
- if (pcpu_size[i-1] < 0)
- pcpu_size[i-1] -= extra;
- else
- pcpu_size[i-1] += extra;
- pcpu_size[i] -= extra;
- ptr += extra;
-
- /* Split block if warranted */
- if (pcpu_size[i] - size > sizeof(unsigned long))
- if (!split_block(i, size))
- return NULL;
-
- /* add the per-cpu scanning areas */
- for_each_possible_cpu(cpu)
- kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
- GFP_KERNEL);
-
- /* Mark allocated */
- pcpu_size[i] = -pcpu_size[i];
- return ptr;
- }
-
- printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
- size);
- return NULL;
-}
-
-static void percpu_modfree(void *freeme)
-{
- unsigned int i;
- void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
- int cpu;
-
- /* First entry is core kernel percpu data. */
- for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
- if (ptr == freeme) {
- pcpu_size[i] = -pcpu_size[i];
- goto free;
- }
- }
- BUG();
-
- free:
- /* remove the per-cpu scanning areas */
- for_each_possible_cpu(cpu)
- kmemleak_free(freeme + per_cpu_offset(cpu));
-
- /* Merge with previous? */
- if (pcpu_size[i-1] >= 0) {
- pcpu_size[i-1] += pcpu_size[i];
- pcpu_num_used--;
- memmove(&pcpu_size[i], &pcpu_size[i+1],
- (pcpu_num_used - i) * sizeof(pcpu_size[0]));
- i--;
- }
- /* Merge with next? */
- if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
- pcpu_size[i] += pcpu_size[i+1];
- pcpu_num_used--;
- memmove(&pcpu_size[i+1], &pcpu_size[i+2],
- (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
- }
-}
-
-static int percpu_modinit(void)
-{
- pcpu_num_used = 2;
- pcpu_num_allocated = 2;
- pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
- GFP_KERNEL);
- /* Static in-kernel percpu data (used). */
- pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
- /* Free room. */
- pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
- if (pcpu_size[1] < 0) {
- printk(KERN_ERR "No per-cpu room for modules.\n");
- pcpu_num_used = 1;
- }
-
- return 0;
-}
-__initcall(percpu_modinit);
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
const char *secstrings)
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 6b2d735846a5..57d527a16f9d 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
\
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
local_irq_save(flags); \
- __raw_spin_lock(&(lock)->raw_lock); \
+ arch_spin_lock(&(lock)->rlock.raw_lock);\
DEBUG_LOCKS_WARN_ON(l->magic != l); \
} while (0)
-#define spin_unlock_mutex(lock, flags) \
- do { \
- __raw_spin_unlock(&(lock)->raw_lock); \
- local_irq_restore(flags); \
- preempt_check_resched(); \
+#define spin_unlock_mutex(lock, flags) \
+ do { \
+ arch_spin_unlock(&(lock)->rlock.raw_lock); \
+ local_irq_restore(flags); \
+ preempt_check_resched(); \
} while (0)
diff --git a/kernel/params.c b/kernel/params.c
index d656c276508d..cf1b69183127 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#if 0
#define DEBUGP printk
@@ -122,9 +123,7 @@ static char *next_arg(char *args, char **param, char **val)
next = args + i;
/* Chew up trailing spaces. */
- while (isspace(*next))
- next++;
- return next;
+ return skip_spaces(next);
}
/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
@@ -139,8 +138,7 @@ int parse_args(const char *name,
DEBUGP("Parsing ARGS: %s\n", args);
/* Chew leading spaces */
- while (isspace(*args))
- args++;
+ args = skip_spaces(args);
while (*args) {
int ret;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e73e53c7582f..9052d6c8c9fd 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
put_ctx(ctx);
}
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
@@ -488,12 +488,12 @@ retry:
task_oncpu_function_call(task, __perf_event_remove_from_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
if (ctx->nr_active && !list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -504,7 +504,7 @@ retry:
*/
if (!list_empty(&event->group_entry))
list_del_event(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
retry:
task_oncpu_function_call(task, __perf_event_disable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
unlock:
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -856,12 +856,12 @@ retry:
task_oncpu_function_call(task, __perf_install_in_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
if (ctx->is_active && list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -872,7 +872,7 @@ retry:
*/
if (list_empty(&event->group_entry))
add_event_to_ctx(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
}
unlock:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
return;
}
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
event->state = PERF_EVENT_STATE_OFF;
retry:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
__perf_event_mark_enabled(event, ctx);
out:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
{
struct perf_event *event;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 0;
if (likely(!ctx->nr_events))
goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
- spin_lock(&ctx->lock);
- spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&ctx->lock);
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,
perf_event_sync_stat(ctx, next_ctx);
}
- spin_unlock(&next_ctx->lock);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&next_ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
struct perf_event *event;
int can_add_hw = 1;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
struct hw_perf_event *hwc;
u64 interrupts, freq;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
perf_enable();
}
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
if (!ctx->nr_events)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group events too):
*/
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
__perf_event_task_sched_out(ctx);
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->group_list, group_entry) {
if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
if (enabled)
unclone_ctx(ctx);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task, smp_processor_id());
out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
event->pmu->read(event);
}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
update_context_time(ctx);
update_event_times(event);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);
@@ -1579,7 +1579,7 @@ static void
__perf_event_init_context(struct perf_event_context *ctx,
struct task_struct *task)
{
- spin_lock_init(&ctx->lock);
+ raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
unclone_ctx(ctx);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (!value)
return -EINVAL;
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->hw.sample_period = value;
}
unlock:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
return ret;
}
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
- spin_lock(&child_ctx->lock);
+ raw_spin_lock(&child_ctx->lock);
child->perf_event_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
- spin_unlock_irqrestore(&child_ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
- spin_lock_irq(&cpuctx->ctx.lock);
+ raw_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
- spin_unlock_irq(&cpuctx->ctx.lock);
+ raw_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 5187136fe1de..218e5af90156 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -6,7 +6,7 @@
#include <linux/vt_kern.h>
#include <linux/kbd_kern.h>
-#include <linux/console.h>
+#include <linux/vt.h>
#include <linux/module.h>
#include "power.h"
@@ -21,8 +21,7 @@ int pm_prepare_console(void)
if (orig_fgconsole < 0)
return 1;
- orig_kmsg = kmsg_redirect;
- kmsg_redirect = SUSPEND_CONSOLE;
+ orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
return 0;
}
@@ -30,7 +29,7 @@ void pm_restore_console(void)
{
if (orig_fgconsole >= 0) {
vt_move_to_console(orig_fgconsole, 0);
- kmsg_redirect = orig_kmsg;
+ vt_kmsg_redirect(orig_kmsg);
}
}
#endif
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a621a67ef4e3..9bb52177af02 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5fcb4fe645e2..ddabb54bb5c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
if (rt_trace_on) { \
rt_trace_on = 0; \
console_verbose(); \
- if (spin_is_locked(&current->pi_lock)) \
- spin_unlock(&current->pi_lock); \
+ if (raw_spin_is_locked(&current->pi_lock)) \
+ raw_spin_unlock(&current->pi_lock); \
} \
} while (0)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 29bd4baf9e75..a9604815786a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
{
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/*
* Task can not go away as we did a get_task() before !
*/
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto out_unlock_pi;
lock = waiter->lock;
- if (!spin_trylock(&lock->wait_lock)) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!raw_spin_trylock(&lock->wait_lock)) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
__rt_mutex_adjust_prio(task);
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto again;
out_unlock_pi:
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
if (pendowner == task)
return 1;
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* might be task:
*/
if (likely(next->task != task)) {
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
*/
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
return res;
}
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
struct task_struct *pendowner;
unsigned long flags;
- spin_lock_irqsave(&current->pi_lock, flags);
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0;
- spin_lock_irqsave(&current->pi_lock, flags);
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
if (first && owner != current) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
}
/*
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
struct rt_mutex_waiter *waiter;
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
fixup_rt_mutex_waiters(lock);
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return ret;
}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list, &lock->wait_lock);
+ raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
/* We got the lock for task. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task, 0);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*/
ret = 0;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
diff --git a/kernel/sched.c b/kernel/sched.c
index db5c26692dd5..9c30858b6463 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -143,7 +143,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -180,7 +180,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -202,7 +202,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -219,7 +219,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -300,7 +300,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
@@ -472,7 +472,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -527,7 +527,7 @@ static struct root_domain def_root_domain;
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -687,7 +687,7 @@ inline void update_rq_clock(struct rq *rq)
*/
int runqueue_is_locked(int cpu)
{
- return spin_is_locked(&cpu_rq(cpu)->lock);
+ return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -895,7 +895,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -919,9 +919,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
#else
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
#endif
}
@@ -951,10 +951,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
}
@@ -971,10 +971,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -983,19 +983,19 @@ void task_rq_unlock_wait(struct task_struct *p)
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ raw_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1008,7 +1008,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
return rq;
}
@@ -1055,10 +1055,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1071,10 +1071,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -1181,7 +1181,7 @@ static void resched_task(struct task_struct *p)
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1203,10 +1203,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1275,7 +1275,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
@@ -1602,11 +1602,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1708,9 +1708,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
if (root_task_group_empty())
return;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1750,7 +1750,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1771,14 +1771,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1792,7 +1794,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1802,7 +1804,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -2025,13 +2027,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
return;
}
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
@@ -2783,10 +2785,10 @@ static inline void post_schedule(struct rq *rq)
if (rq->post_schedule) {
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
@@ -3068,15 +3070,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3093,9 +3095,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
@@ -4188,14 +4190,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4205,7 +4208,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4387,10 +4390,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ raw_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -5259,11 +5262,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu);
@@ -5457,7 +5460,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5493,7 +5496,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
@@ -6324,7 +6327,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6334,7 +6337,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6358,7 +6361,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(&rq->lock);
+ do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
@@ -6978,7 +6981,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
@@ -6990,7 +6993,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7207,10 +7210,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
}
@@ -7222,7 +7225,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7231,14 +7234,14 @@ static int migration_thread(void *data)
list_del_init(head->next);
if (req->task != NULL) {
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
@@ -7360,14 +7363,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7403,9 +7406,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7671,13 +7674,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7702,13 +7705,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7965,7 +7968,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7991,7 +7994,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -8277,14 +8280,14 @@ enum s_alloc {
*/
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
static int
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
{
if (sg)
- *sg = &per_cpu(sched_group_cpus, cpu).sg;
+ *sg = &per_cpu(sched_groups, cpu).sg;
return cpu;
}
#endif /* CONFIG_SCHED_SMT */
@@ -9347,13 +9350,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -9513,7 +9516,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9573,7 +9576,7 @@ void __init sched_init(void)
#elif defined CONFIG_USER_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
init_tg_rt_entry(&init_task_group,
- &per_cpu(init_rt_rq, i),
+ &per_cpu(init_rt_rq_var, i),
&per_cpu(init_sched_rt_entity, i), i, 1,
root_task_group.rt_se[i]);
#endif
@@ -9611,7 +9614,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9734,13 +9737,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10103,9 +10106,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10290,18 +10293,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10406,15 +10409,15 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
@@ -10705,9 +10708,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10723,9 +10726,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
@@ -10959,9 +10962,9 @@ void synchronize_sched_expedited(void)
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
@@ -10969,11 +10972,11 @@ void synchronize_sched_expedited(void)
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 0f052fc674d5..597b33099dfa 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
cpumask_set_cpu(cpu, vec->mask);
vec->count++;
if (vec->count == 1)
set_bit(newpri, cp->pri_active);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
vec->count--;
if (!vec->count)
clear_bit(oldpri, cp->pri_active);
cpumask_clear_cpu(cpu, vec->mask);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
*currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- spin_lock_init(&vec->lock);
+ raw_spin_lock_init(&vec->lock);
vec->count = 0;
if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9a7e859b8fbf..7cb5bb6b95be 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
/* values 2-101 are RT priorities 0-99 */
struct cpupri_vec {
- spinlock_t lock;
+ raw_spinlock_t lock;
int count;
cpumask_var_t mask;
};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5ae24fc65d75..67f95aada4b9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 804a411838f1..5bedf6e3ebf3 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p)
struct rq *rq = this_rq();
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);
@@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p)
resched_task(rq->curr);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index b810e22772d5..21b969a28725 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
pr_err("bad: scheduling from the idle thread!\n");
dump_stack();
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index aecbd9c6b20c..d2ea2828164e 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
weight = cpumask_weight(rd->span);
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
if (iter == rt_rq)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
rt_rq->rt_runtime += diff;
more = 1;
if (rt_rq->rt_runtime == rt_period) {
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
return more;
}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
s64 want;
int i;
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
iter->rt_runtime -= want;
want -= want;
}
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
}
return more;
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running)
idle = 0;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
return idle;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
- spin_unlock(&lowest_rq->lock);
+ raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
diff --git a/kernel/smp.c b/kernel/smp.c
index a8c76069cf50..de735a6637d0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
static struct {
struct list_head queue;
- spinlock_t lock;
+ raw_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
.queue = LIST_HEAD_INIT(call_function.queue),
- .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
};
enum {
@@ -35,7 +35,7 @@ struct call_function_data {
struct call_single_queue {
struct list_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);
- spin_lock_init(&q->lock);
+ raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
unsigned long flags;
int ipi;
- spin_lock_irqsave(&dst->lock, flags);
+ raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
- spin_unlock_irqrestore(&dst->lock, flags);
+ raw_spin_unlock_irqrestore(&dst->lock, flags);
/*
* The list addition should be visible before sending the IPI
@@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
void generic_smp_call_function_interrupt(void)
{
struct call_function_data *data;
- int cpu = get_cpu();
+ int cpu = smp_processor_id();
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
if (refs)
@@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
csd_unlock(&data->csd);
}
- put_cpu();
}
/*
@@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
- spin_lock(&q->lock);
+ raw_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
+ raw_spin_unlock(&q->lock);
while (!list_empty(&list)) {
struct call_single_data *data;
@@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
cpumask_clear_cpu(this_cpu, data->cpumask);
atomic_set(&data->refs, cpumask_weight(data->cpumask));
- spin_lock_irqsave(&call_function.lock, flags);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- spin_unlock_irqrestore(&call_function.lock, flags);
+ raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*
* Make the list addition visible before sending the ipi.
@@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function.lock);
+ raw_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function.lock);
+ raw_spin_unlock_irq(&call_function.lock);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 21939d9e830e..a09502e2ef75 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -697,7 +697,7 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
-static int ksoftirqd(void * __bind_cpu)
+static int run_ksoftirqd(void * __bind_cpu)
{
set_current_state(TASK_INTERRUPTIBLE);
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
+ p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
if (IS_ERR(p)) {
printk("ksoftirqd for %i failed\n", hotcpu);
return NOTIFY_BAD;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 81324d12eb35..d22579087e27 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -22,9 +22,9 @@
static DEFINE_SPINLOCK(print_lock);
-static DEFINE_PER_CPU(unsigned long, touch_timestamp);
-static DEFINE_PER_CPU(unsigned long, print_timestamp);
-static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
+static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
+static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
+static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60;
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
{
int this_cpu = raw_smp_processor_id();
- __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
+ __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
}
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = 0;
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
/* Cause each CPU to re-update its timestamp rather than complain */
for_each_online_cpu(cpu)
- per_cpu(touch_timestamp, cpu) = 0;
+ per_cpu(softlockup_touch_ts, cpu) = 0;
}
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void softlockup_tick(void)
{
int this_cpu = smp_processor_id();
- unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
- unsigned long print_timestamp;
+ unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
+ unsigned long print_ts;
struct pt_regs *regs = get_irq_regs();
unsigned long now;
/* Is detection switched off? */
- if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
+ if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
/* Be sure we don't false trigger if switched back on */
- if (touch_timestamp)
- per_cpu(touch_timestamp, this_cpu) = 0;
+ if (touch_ts)
+ per_cpu(softlockup_touch_ts, this_cpu) = 0;
return;
}
- if (touch_timestamp == 0) {
+ if (touch_ts == 0) {
__touch_softlockup_watchdog();
return;
}
- print_timestamp = per_cpu(print_timestamp, this_cpu);
+ print_ts = per_cpu(softlockup_print_ts, this_cpu);
/* report at most once a second */
- if (print_timestamp == touch_timestamp || did_panic)
+ if (print_ts == touch_ts || did_panic)
return;
/* do not print during early bootup: */
@@ -140,18 +140,18 @@ void softlockup_tick(void)
* Wake up the high-prio watchdog task twice per
* threshold timespan.
*/
- if (now > touch_timestamp + softlockup_thresh/2)
- wake_up_process(per_cpu(watchdog_task, this_cpu));
+ if (now > touch_ts + softlockup_thresh/2)
+ wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */
- if (now <= (touch_timestamp + softlockup_thresh))
+ if (now <= (touch_ts + softlockup_thresh))
return;
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
spin_lock(&print_lock);
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
- this_cpu, now - touch_timestamp,
+ this_cpu, now - touch_ts,
current->comm, task_pid_nr(current));
print_modules();
print_irqtrace_events(current);
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- BUG_ON(per_cpu(watchdog_task, hotcpu));
+ BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(softlockup_touch_ts, hotcpu) = 0;
+ per_cpu(softlockup_watchdog, hotcpu) = p;
kthread_bind(p, hotcpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- wake_up_process(per_cpu(watchdog_task, hotcpu));
+ wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- if (!per_cpu(watchdog_task, hotcpu))
+ if (!per_cpu(softlockup_watchdog, hotcpu))
break;
/* Unbind so it can run. Fall thru. */
- kthread_bind(per_cpu(watchdog_task, hotcpu),
+ kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
cpumask_any(cpu_online_mask));
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- p = per_cpu(watchdog_task, hotcpu);
- per_cpu(watchdog_task, hotcpu) = NULL;
+ p = per_cpu(softlockup_watchdog, hotcpu);
+ per_cpu(softlockup_watchdog, hotcpu) = NULL;
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 41e042219ff6..be6517fb9c14 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
* include/linux/spinlock_api_smp.h
*/
#else
+#define raw_read_can_lock(l) read_can_lock(l)
+#define raw_write_can_lock(l) write_can_lock(l)
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
@@ -42,49 +44,49 @@
* towards that other CPU that it should break the lock ASAP.
*/
#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc __##op##_lock(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
preempt_enable(); \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
} \
\
-unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
+unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
for (;;) { \
preempt_disable(); \
local_irq_save(flags); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
local_irq_restore(flags); \
preempt_enable(); \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
return flags; \
} \
\
-void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
{ \
- _##op##_lock_irqsave(lock); \
+ _raw_##op##_lock_irqsave(lock); \
} \
\
-void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
/* irq-disabling. We use the generic preemption-aware */ \
/* function: */ \
/**/ \
- flags = _##op##_lock_irqsave(lock); \
+ flags = _raw_##op##_lock_irqsave(lock); \
local_bh_disable(); \
local_irq_restore(flags); \
} \
@@ -107,269 +109,269 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_irqsave()
* __[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(spin, raw_spinlock);
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
{
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ return __raw_spin_trylock(lock);
}
-EXPORT_SYMBOL(_spin_lock_nested);
+EXPORT_SYMBOL(_raw_spin_trylock);
+#endif
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
- int subclass)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
- _raw_spin_lock_flags, &flags);
- return flags;
+ return __raw_spin_trylock_bh(lock);
}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+EXPORT_SYMBOL(_raw_spin_trylock_bh);
+#endif
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
- struct lockdep_map *nest_lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
- preempt_disable();
- spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __raw_spin_lock(lock);
}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
-
+EXPORT_SYMBOL(_raw_spin_lock);
#endif
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK
-int __lockfunc _spin_trylock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
- return __spin_trylock(lock);
+ return __raw_spin_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_spin_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_READ_TRYLOCK
-int __lockfunc _read_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
{
- return __read_trylock(lock);
+ __raw_spin_lock_irq(lock);
}
-EXPORT_SYMBOL(_read_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_TRYLOCK
-int __lockfunc _write_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_BH
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
{
- return __write_trylock(lock);
+ __raw_spin_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK
-void __lockfunc _read_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
{
- __read_lock(lock);
+ __raw_spin_unlock(lock);
}
-EXPORT_SYMBOL(_read_lock);
+EXPORT_SYMBOL(_raw_spin_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
{
- return __spin_lock_irqsave(lock);
+ __raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_spin_lock_irqsave);
+EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
{
- __spin_lock_irq(lock);
+ __raw_spin_unlock_irq(lock);
}
-EXPORT_SYMBOL(_spin_lock_irq);
+EXPORT_SYMBOL(_raw_spin_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_BH
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
{
- __spin_lock_bh(lock);
+ __raw_spin_unlock_bh(lock);
}
-EXPORT_SYMBOL(_spin_lock_bh);
+EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_TRYLOCK
+int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
- return __read_lock_irqsave(lock);
+ return __raw_read_trylock(lock);
}
-EXPORT_SYMBOL(_read_lock_irqsave);
+EXPORT_SYMBOL(_raw_read_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQ
-void __lockfunc _read_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK
+void __lockfunc _raw_read_lock(rwlock_t *lock)
{
- __read_lock_irq(lock);
+ __raw_read_lock(lock);
}
-EXPORT_SYMBOL(_read_lock_irq);
+EXPORT_SYMBOL(_raw_read_lock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_BH
-void __lockfunc _read_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
{
- __read_lock_bh(lock);
+ return __raw_read_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_read_lock_bh);
+EXPORT_SYMBOL(_raw_read_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
{
- return __write_lock_irqsave(lock);
+ __raw_read_lock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock_irqsave);
+EXPORT_SYMBOL(_raw_read_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
-void __lockfunc _write_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_BH
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
{
- __write_lock_irq(lock);
+ __raw_read_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_raw_read_lock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_BH
-void __lockfunc _write_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK
+void __lockfunc _raw_read_unlock(rwlock_t *lock)
{
- __write_lock_bh(lock);
+ __raw_read_unlock(lock);
}
-EXPORT_SYMBOL(_write_lock_bh);
+EXPORT_SYMBOL(_raw_read_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __spin_lock(lock);
+ __raw_read_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_spin_lock);
+EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK
-void __lockfunc _write_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
{
- __write_lock(lock);
+ __raw_read_unlock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_raw_read_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_BH
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
{
- __spin_unlock(lock);
+ __raw_read_unlock_bh(lock);
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_raw_read_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK
-void __lockfunc _write_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
+int __lockfunc _raw_write_trylock(rwlock_t *lock)
{
- __write_unlock(lock);
+ return __raw_write_trylock(lock);
}
-EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(_raw_write_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK
-void __lockfunc _read_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK
+void __lockfunc _raw_write_lock(rwlock_t *lock)
{
- __read_unlock(lock);
+ __raw_write_lock(lock);
}
-EXPORT_SYMBOL(_read_unlock);
+EXPORT_SYMBOL(_raw_write_lock);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
{
- __spin_unlock_irqrestore(lock, flags);
+ return __raw_write_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
{
- __spin_unlock_irq(lock);
+ __raw_write_lock_irq(lock);
}
-EXPORT_SYMBOL(_spin_unlock_irq);
+EXPORT_SYMBOL(_raw_write_lock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
{
- __spin_unlock_bh(lock);
+ __raw_write_lock_bh(lock);
}
-EXPORT_SYMBOL(_spin_unlock_bh);
+EXPORT_SYMBOL(_raw_write_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
+void __lockfunc _raw_write_unlock(rwlock_t *lock)
{
- __read_unlock_irqrestore(lock, flags);
+ __raw_write_unlock(lock);
}
-EXPORT_SYMBOL(_read_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_unlock);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
-void __lockfunc _read_unlock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __read_unlock_irq(lock);
+ __raw_write_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_read_unlock_irq);
+EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_BH
-void __lockfunc _read_unlock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
{
- __read_unlock_bh(lock);
+ __raw_write_unlock_irq(lock);
}
-EXPORT_SYMBOL(_read_unlock_bh);
+EXPORT_SYMBOL(_raw_write_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
{
- __write_unlock_irqrestore(lock, flags);
+ __raw_write_unlock_bh(lock);
}
-EXPORT_SYMBOL(_write_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-void __lockfunc _write_unlock_irq(rwlock_t *lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
- __write_unlock_irq(lock);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_write_unlock_irq);
-#endif
+EXPORT_SYMBOL(_raw_spin_lock_nested);
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
-void __lockfunc _write_unlock_bh(rwlock_t *lock)
+unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
+ int subclass)
{
- __write_unlock_bh(lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
+ do_raw_spin_lock_flags, &flags);
+ return flags;
}
-EXPORT_SYMBOL(_write_unlock_bh);
-#endif
+EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
+ struct lockdep_map *nest_lock)
{
- return __spin_trylock_bh(lock);
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
+
#endif
notrace int in_lock_functions(unsigned long addr)
diff --git a/kernel/sys.c b/kernel/sys.c
index 585d6cd10040..20ccfb5da6af 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -189,10 +189,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
!(user = find_user(who)))
goto out_unlock; /* No processes for this user */
- do_each_thread(g, p)
+ do_each_thread(g, p) {
if (__task_cred(p)->uid == who)
error = set_one_prio(p, niceval, error);
- while_each_thread(g, p);
+ } while_each_thread(g, p);
if (who != cred->uid)
free_uid(user); /* For find_user() */
break;
@@ -252,13 +252,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
!(user = find_user(who)))
goto out_unlock; /* No processes for this user */
- do_each_thread(g, p)
+ do_each_thread(g, p) {
if (__task_cred(p)->uid == who) {
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
- while_each_thread(g, p);
+ } while_each_thread(g, p);
if (who != cred->uid)
free_uid(user); /* for find_user() */
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 554ac4894f0f..45e4bef0012a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1051,7 +1051,7 @@ static struct ctl_table vm_table[] = {
.extra2 = &one_hundred,
},
#ifdef CONFIG_HUGETLB_PAGE
- {
+ {
.procname = "nr_hugepages",
.data = NULL,
.maxlen = sizeof(unsigned long),
@@ -1059,7 +1059,18 @@ static struct ctl_table vm_table[] = {
.proc_handler = hugetlb_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
- },
+ },
+#ifdef CONFIG_NUMA
+ {
+ .procname = "nr_hugepages_mempolicy",
+ .data = NULL,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &hugetlb_mempolicy_sysctl_handler,
+ .extra1 = (void *)&hugetlb_zero,
+ .extra2 = (void *)&hugetlb_infinity,
+ },
+#endif
{
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
@@ -1120,7 +1131,8 @@ static struct ctl_table vm_table[] = {
.data = &sysctl_max_map_count,
.maxlen = sizeof(sysctl_max_map_count),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
},
#else
{
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 20a8920029ee..3d5fc0fd1cca 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released);
static RAW_NOTIFIER_HEAD(clockevents_chain);
/* Protection for the above */
-static DEFINE_SPINLOCK(clockevents_lock);
+static DEFINE_RAW_SPINLOCK(clockevents_lock);
/**
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
@@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
unsigned long flags;
int ret;
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
ret = raw_notifier_chain_register(&clockevents_chain, nb);
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
return ret;
}
@@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev)
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
BUG_ON(!dev->cpumask);
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
clockevents_notify_released();
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
}
EXPORT_SYMBOL_GPL(clockevents_register_device);
@@ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg)
struct list_head *node, *tmp;
unsigned long flags;
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
clockevents_do_notify(reason, arg);
switch (reason) {
@@ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg)
default:
break;
}
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index c2ec25087a35..b3bafd5fc66d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
-static DEFINE_SPINLOCK(tick_broadcast_lock);
+static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
tick_broadcast_clear_oneshot(cpu);
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
*/
static void tick_do_periodic_broadcast(void)
{
- spin_lock(&tick_broadcast_lock);
+ raw_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
- spin_unlock(&tick_broadcast_lock);
+ raw_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
unsigned long flags;
int cpu, bc_stopped;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
tick_broadcast_setup_oneshot(bc);
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
clockevents_shutdown(bc);
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void)
unsigned long flags;
int broadcast = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void)
break;
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
ktime_t now, next_event;
int cpu;
- spin_lock(&tick_broadcast_lock);
+ raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
@@ -443,7 +443,7 @@ again:
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
- spin_unlock(&tick_broadcast_lock);
+ raw_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
unsigned long flags;
int cpu;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Periodic mode does not care about the enter/exit of power
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 83c4417b6a3c..b6b898d2eeef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
ktime_t tick_next_period;
ktime_t tick_period;
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
-DEFINE_SPINLOCK(tick_device_lock);
+static DEFINE_RAW_SPINLOCK(tick_device_lock);
/*
* Debugging: see timer_list.c
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
int cpu, ret = NOTIFY_OK;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return NOTIFY_STOP;
out_bc:
@@ -278,7 +278,7 @@ out_bc:
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
}
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
struct clock_event_device *dev = td->evtdev;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
/*
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
clockevents_exchange_device(dev, NULL);
td->evtdev = NULL;
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_suspend(void)
@@ -330,9 +330,9 @@ static void tick_suspend(void)
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_shutdown(td->evtdev);
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_resume(void)
@@ -341,7 +341,7 @@ static void tick_resume(void)
unsigned long flags;
int broadcast = tick_resume_broadcast();
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
if (!broadcast) {
@@ -350,7 +350,7 @@ static void tick_resume(void)
else
tick_resume_oneshot();
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index b1c05bf75ee0..290eefbc1f60 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -6,7 +6,6 @@
#define TICK_DO_TIMER_BOOT -2
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
-extern spinlock_t tick_device_lock;
extern ktime_t tick_next_period;
extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 9d80db4747d4..28265636b6c2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
next_one:
i = 0;
- spin_lock_irqsave(&base->cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = base->first;
/*
@@ -100,13 +100,13 @@ next_one:
timer = rb_entry(curr, struct hrtimer, node);
tmp = *timer;
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
}
static void
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index ee5681f8d7ec..2f3b585b8d7d 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
*/
-static DEFINE_PER_CPU(spinlock_t, lookup_lock);
+static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
/*
* Mutex to serialize state changes with show-stats activities:
@@ -238,14 +238,14 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
/*
* It doesnt matter which lock we take:
*/
- spinlock_t *lock;
+ raw_spinlock_t *lock;
struct entry *entry, input;
unsigned long flags;
if (likely(!timer_stats_active))
return;
- lock = &per_cpu(lookup_lock, raw_smp_processor_id());
+ lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
input.timer = timer;
input.start_func = startf;
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.pid = pid;
input.timer_flag = timer_flag;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (!timer_stats_active)
goto out_unlock;
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
atomic_inc(&overflow_count);
out_unlock:
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,9 +348,11 @@ static void sync_access(void)
int cpu;
for_each_online_cpu(cpu) {
- spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
+ raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
+
+ raw_spin_lock_irqsave(lock, flags);
/* nothing */
- spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
}
@@ -408,7 +410,7 @@ void __init init_timer_stats(void)
int cpu;
for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(lookup_lock, cpu));
+ raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
}
static int __init init_tstats_procfs(void)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..f58c9ad15830 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
int ret;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
again:
/*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again;
out:
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
synchronize_sched();
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88bd9ae2a9ed..bb6b5e7fa2a2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
*/
static int tracing_disabled = 1;
-DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
+DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
- local_inc(&__get_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
}
static inline void ftrace_enable_cpu(void)
{
- local_dec(&__get_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
preempt_enable();
}
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
*/
static struct trace_array max_tr;
-static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
- * This is defined as a raw_spinlock_t in order to help
+ * This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
-static raw_spinlock_t ftrace_max_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t ftrace_max_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!__raw_spin_trylock(&trace_cmdline_lock))
+ if (!arch_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
}
void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
}
preempt_disable();
- __raw_spin_lock(&trace_cmdline_lock);
+ arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
/* Lockdep uses trace_printk for lock tracing */
local_irq_save(flags);
- __raw_spin_lock(&trace_buf_lock);
+ arch_spin_lock(&trace_buf_lock);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
out:
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
pause_graph_tracing();
raw_local_irq_save(irq_flags);
- __raw_spin_lock(&trace_buf_lock);
+ arch_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable();
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
- static raw_spinlock_t ftrace_dump_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t ftrace_dump_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
/* only one dump */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_dump_lock);
+ arch_spin_lock(&ftrace_dump_lock);
if (dump_ran)
goto out;
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
}
out:
- __raw_spin_unlock(&ftrace_dump_lock);
+ arch_spin_unlock(&ftrace_dump_lock);
local_irq_restore(flags);
}
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
- max_tr.data[i] = &per_cpu(max_data, i);
+ max_tr.data[i] = &per_cpu(max_tr_data, i);
}
trace_init_cmdlines();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 7fa33cab6962..a52bed2eedd8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled;
-DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
+DECLARE_PER_CPU(int, ftrace_cpu_disabled);
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 878c03f386ba..84a3a7ba072a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
/* keep prev_time and lock in the same cacheline. */
static struct {
u64 prev_time;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp =
{
- .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
};
u64 notrace trace_clock_global(void)
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
if (unlikely(in_nmi()))
goto out;
- __raw_spin_lock(&trace_clock_struct.lock);
+ arch_spin_lock(&trace_clock_struct.lock);
/*
* TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
trace_clock_struct.prev_time = now;
- __raw_spin_unlock(&trace_clock_struct.lock);
+ arch_spin_unlock(&trace_clock_struct.lock);
out:
raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a43d009c561a..b1342c5d37cf 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 69543a905cd5..7b97000745f5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -20,10 +20,10 @@
#define BTS_BUFFER_SIZE (1 << 13)
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
+static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
+static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
-#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
static int trace_hw_branches_enabled __read_mostly;
static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
static void bts_trace_init_cpu(int cpu)
{
- per_cpu(tracer, cpu) =
- ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
+ per_cpu(hwb_tracer, cpu) =
+ ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
+ BTS_BUFFER_SIZE, NULL, (size_t)-1,
+ BTS_KERNEL);
- if (IS_ERR(per_cpu(tracer, cpu)))
- per_cpu(tracer, cpu) = NULL;
+ if (IS_ERR(per_cpu(hwb_tracer, cpu)))
+ per_cpu(hwb_tracer, cpu) = NULL;
}
static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
for_each_online_cpu(cpu) {
bts_trace_init_cpu(cpu);
- if (likely(per_cpu(tracer, cpu)))
+ if (likely(per_cpu(hwb_tracer, cpu)))
trace_hw_branches_enabled = 1;
}
trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu) {
- if (likely(per_cpu(tracer, cpu))) {
- ds_release_bts(per_cpu(tracer, cpu));
- per_cpu(tracer, cpu) = NULL;
+ if (likely(per_cpu(hwb_tracer, cpu))) {
+ ds_release_bts(per_cpu(hwb_tracer, cpu));
+ per_cpu(hwb_tracer, cpu) = NULL;
}
}
trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_resume_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_resume_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 0;
put_online_cpus();
}
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 1;
put_online_cpus();
}
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
bts_trace_init_cpu(cpu);
if (trace_hw_branches_suspended &&
- likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
}
break;
case CPU_DOWN_PREPARE:
/* The notification is sent with interrupts enabled. */
- if (likely(per_cpu(tracer, cpu))) {
- ds_release_bts(per_cpu(tracer, cpu));
- per_cpu(tracer, cpu) = NULL;
+ if (likely(per_cpu(hwb_tracer, cpu))) {
+ ds_release_bts(per_cpu(hwb_tracer, cpu));
+ per_cpu(hwb_tracer, cpu) = NULL;
}
}
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
/*
* We need to collect the trace on the respective cpu since ftrace
* implicitly adds the record for the current cpu.
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
on_each_cpu(trace_bts_cpu, iter->tr, 1);
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_resume_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_resume_bts(per_cpu(hwb_tracer, cpu));
put_online_cpus();
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 26185d727676..0271742abb8d 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
static int wakeup_rt;
-static raw_spinlock_t wakeup_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t wakeup_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
goto out;
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
out_unlock:
__wakeup_reset(wakeup_trace);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
tracing_reset_online_cpus(tr);
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
}
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
goto out;
/* interrupts should be off from try_to_wake_up */
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index dc98309e839a..280fea470d67 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
/* Don't allow flipping of max traces now */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
cnt = ring_buffer_entries(tr->buffer);
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
break;
}
tracing_on();
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);
if (count)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8504ac71e4e8..678a5120ee30 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
};
static unsigned long max_stack_size;
-static raw_spinlock_t max_stack_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t max_stack_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
@@ -54,7 +54,7 @@ static inline void check_stack(void)
return;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
/* a race could have already updated it */
if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
}
out:
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
}
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
return ret;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
*ptr = val;
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
local_irq_disable();
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void t_stop(struct seq_file *m, void *p)
{
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_enable();
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2f22cf4576db..8cf9938dd147 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -575,7 +575,7 @@ config DEBUG_BUGVERBOSE
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
- default !EMBEDDED
+ default y
help
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5bc..4b1b083f219c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
-static const char *skip_sep(const char *cp)
-{
- while (*cp && isspace(*cp))
- cp++;
-
- return cp;
-}
-
static const char *skip_arg(const char *cp)
{
while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
int count = 0;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
count++;
str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
argvp = argv;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
const char *p = str;
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce38..02e3b31b3a79 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
+#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
+
+static inline u32
+crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
+{
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
+# else
+# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+# endif
+ const u32 *b = (const u32 *)buf;
+ size_t rem_len;
+
+ /* Align it */
+ if (unlikely((long)b & 3 && len)) {
+ u8 *p = (u8 *)b;
+ do {
+ DO_CRC(*p++);
+ } while ((--len) && ((long)p)&3);
+ b = (u32 *)p;
+ }
+ rem_len = len & 3;
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ len = len >> 2;
+ for (--b; len; --len) {
+ crc ^= *++b; /* use pre increment for speed */
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ }
+ len = rem_len;
+ /* And the last few bytes */
+ if (len) {
+ u8 *p = (u8 *)(b + 1) - 1;
+ do {
+ DO_CRC(*++p); /* use pre increment for speed */
+ } while (--len);
+ }
+ return crc;
+}
+#endif
/**
* crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
- const u32 *b =(u32 *)p;
const u32 *tab = crc32table_le;
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
-
crc = __cpu_to_le32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
-
+ crc = crc32_body(crc, p, len, tab);
return __le32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
@@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
- const u32 *b =(u32 *)p;
const u32 *tab = crc32table_be;
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
-
crc = __cpu_to_be32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (u32 *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
+ crc = crc32_body(crc, p, len, tab);
return __be32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a322..26baa620e95b 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
#include <linux/ctype.h>
#include <linux/module.h>
-unsigned char _ctype[] = {
-_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
-_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
-_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
-_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
-_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
-_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
-_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
-_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
-_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
-_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
-_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
-_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
-_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
-_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index eae56fddfa3b..a9a8996d286a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -26,14 +26,14 @@
struct debug_bucket {
struct hlist_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
-static DEFINE_SPINLOCK(pool_lock);
+static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -96,10 +96,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ raw_spin_unlock(&pool_lock);
return obj;
}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
struct debug_obj *obj;
unsigned long flags;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
}
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
unsigned long flags;
int sched = 0;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
default:
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
debug_print_object(&o, "deactivate");
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
break;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -597,7 +597,7 @@ repeat:
break;
}
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
}
res = 0;
out:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
- spin_lock_init(&obj_hash[i].lock);
+ raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7f..f93502915988 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
@@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
char *end;
/* Skip leading whitespace */
- while (*buf && isspace(*buf))
- buf++;
+ buf = skip_spaces(buf);
if (!*buf)
break; /* oh, it was trailing whitespace */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -23,7 +23,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
- * (This works on UP too - _raw_spin_trylock will never
+ * (This works on UP too - do_raw_spin_trylock will never
* return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
- while (!_raw_spin_trylock(&kernel_flag)) {
+ while (!do_raw_spin_trylock(&kernel_flag)) {
if (need_resched())
return -EAGAIN;
cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void)
{
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched();
}
/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
+ * do_raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
preempt_disable();
- if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
return;
}
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (raw_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
- } while (!_raw_spin_trylock(&kernel_flag));
+ } while (!do_raw_spin_trylock(&kernel_flag));
}
}
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
}
#endif
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
* the BKL is not covered by lockdep, so we open-code the
* unlocking sequence (and thus avoid the dep-chain ops):
*/
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5f..fb34977246bb 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
args[argc].from = s;
switch (*p++) {
- case 's':
- if (strlen(s) == 0)
+ case 's': {
+ size_t str_len = strlen(s);
+
+ if (str_len == 0)
return 0;
- else if (len == -1 || len > strlen(s))
- len = strlen(s);
+ if (len == -1 || len > str_len)
+ len = str_len;
args[argc].to = s + len;
break;
+ }
case 'd':
simple_strtol(s, &args[argc].to, 0);
goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->rawlock && !head->spinlock);
+ if (head->rawlock)
+ WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+ if (head->spinlock)
+ WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db11..ccf95bff7984 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
+int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ int ret = 1;
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ ret = (sem->activity != 0);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rwsem_is_locked);
+
/*
* initialise the semaphore
*/
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
+EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
@@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
-EXPORT_SYMBOL(__init_rwsem);
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write_nested);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
-void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
-EXPORT_SYMBOL(__spin_lock_init);
+EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(spinlock_t *lock, const char *msg)
+static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
-debug_spin_lock_before(spinlock_t *lock)
+debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
-static inline void debug_spin_lock_after(spinlock_t *lock)
+static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
-static inline void debug_spin_unlock(spinlock_t *lock)
+static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+ SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
-static void __spin_lock_debug(spinlock_t *lock)
+static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_spin_trylock(&lock->raw_lock))
+ if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
-void _raw_spin_lock(spinlock_t *lock)
+void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
- if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+ if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
-int _raw_spin_trylock(spinlock_t *lock)
+int do_raw_spin_trylock(raw_spinlock_t *lock)
{
- int ret = __raw_spin_trylock(&lock->raw_lock);
+ int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
-void _raw_spin_unlock(spinlock_t *lock)
+void do_raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
- __raw_spin_unlock(&lock->raw_lock);
+ arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_read_trylock(&lock->raw_lock))
+ if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_read_lock(rwlock_t *lock)
+void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ arch_read_lock(&lock->raw_lock);
}
-int _raw_read_trylock(rwlock_t *lock)
+int do_raw_read_trylock(rwlock_t *lock)
{
- int ret = __raw_read_trylock(&lock->raw_lock);
+ int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
return ret;
}
-void _raw_read_unlock(rwlock_t *lock)
+void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_unlock(&lock->raw_lock);
+ arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_write_trylock(&lock->raw_lock))
+ if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_write_lock(rwlock_t *lock)
+void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
-int _raw_write_trylock(rwlock_t *lock)
+int do_raw_write_trylock(rwlock_t *lock)
{
- int ret = __raw_write_trylock(&lock->raw_lock);
+ int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
return ret;
}
-void _raw_write_unlock(rwlock_t *lock)
+void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
- __raw_write_unlock(&lock->raw_lock);
+ arch_write_unlock(&lock->raw_lock);
}
diff --git a/lib/string.c b/lib/string.c
index e96421ab9a9a..afce96af3afd 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -338,20 +338,34 @@ EXPORT_SYMBOL(strnchr);
#endif
/**
- * strstrip - Removes leading and trailing whitespace from @s.
+ * skip_spaces - Removes leading whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @s.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+EXPORT_SYMBOL(skip_spaces);
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
-char *strstrip(char *s)
+char *strim(char *s)
{
size_t size;
char *end;
+ s = skip_spaces(s);
size = strlen(s);
-
if (!size)
return s;
@@ -360,12 +374,9 @@ char *strstrip(char *s)
end--;
*(end + 1) = '\0';
- while (*s && isspace(*s))
- s++;
-
return s;
}
-EXPORT_SYMBOL(strstrip);
+EXPORT_SYMBOL(strim);
#ifndef __HAVE_ARCH_STRLEN
/**
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6438cd5599ee..735343fc857a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
-/*
+/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
}
/**
- * simple_strtoul - convert a string to an unsigned long
+ * simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long result = 0;
+ unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
result = result * base + value;
cp++;
}
-
if (endp)
*endp = (char *)cp;
+
return result;
}
-EXPORT_SYMBOL(simple_strtoul);
+EXPORT_SYMBOL(simple_strtoull);
/**
- * simple_strtol - convert a string to a signed long
+ * simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-long simple_strtol(const char *cp, char **endp, unsigned int base)
+unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
- if(*cp == '-')
- return -simple_strtoul(cp + 1, endp, base);
- return simple_strtoul(cp, endp, base);
+ return simple_strtoull(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtol);
+EXPORT_SYMBOL(simple_strtoul);
/**
- * simple_strtoull - convert a string to an unsigned long long
+ * simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+long simple_strtol(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result = 0;
-
- if (!base)
- base = simple_guess_base(cp);
-
- if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
- cp += 2;
-
- while (isxdigit(*cp)) {
- unsigned int value;
-
- value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
- if (value >= base)
- break;
- result = result * base + value;
- cp++;
- }
+ if (*cp == '-')
+ return -simple_strtoul(cp + 1, endp, base);
- if (endp)
- *endp = (char *)cp;
- return result;
+ return simple_strtoul(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtoull);
+EXPORT_SYMBOL(simple_strtol);
/**
* simple_strtoll - convert a string to a signed long long
@@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull);
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if(*cp=='-')
+ if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
+
return simple_strtoull(cp, endp, base);
}
@@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
val = simple_strtoul(cp, &tail, base);
if (tail == cp)
return -EINVAL;
+
if ((*tail == '\0') ||
((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
*res = val;
@@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll);
static int skip_atoi(const char **s)
{
- int i=0;
+ int i = 0;
while (isdigit(**s))
i = i*10 + *((*s)++) - '0';
+
return i;
}
@@ -302,7 +286,7 @@ static int skip_atoi(const char **s)
/* Formats correctly any integer in [0,99999].
* Outputs from one to five digits depending on input.
* On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
-static char* put_dec_trunc(char *buf, unsigned q)
+static char *put_dec_trunc(char *buf, unsigned q)
{
unsigned d3, d2, d1, d0;
d1 = (q>>4) & 0xf;
@@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0'; /* next digit */
if (q != 0)
- *buf++ = q + '0'; /* most sign. digit */
+ *buf++ = q + '0'; /* most sign. digit */
}
}
}
+
return buf;
}
/* Same with if's removed. Always emits five digits */
-static char* put_dec_full(char *buf, unsigned q)
+static char *put_dec_full(char *buf, unsigned q)
{
/* BTW, if q is in [0,9999], 8-bit ints will be enough, */
/* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q)
d2 = (q>>8) & 0xf;
d3 = (q>>12);
- /* Possible ways to approx. divide by 10 */
- /* gcc -O2 replaces multiply with shifts and adds */
- // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
- // (x * 0x67) >> 10: 1100111
- // (x * 0x34) >> 9: 110100 - same
- // (x * 0x1a) >> 8: 11010 - same
- // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
-
+ /*
+ * Possible ways to approx. divide by 10
+ * gcc -O2 replaces multiply with shifts and adds
+ * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
+ * (x * 0x67) >> 10: 1100111
+ * (x * 0x34) >> 9: 110100 - same
+ * (x * 0x1a) >> 8: 11010 - same
+ * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
+ */
d0 = 6*(d3 + d2 + d1) + (q & 0xf);
q = (d0 * 0xcd) >> 11;
d0 = d0 - 10*q;
@@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0';
*buf++ = q + '0';
+
return buf;
}
/* No inlining helps gcc to use registers better */
-static noinline char* put_dec(char *buf, unsigned long long num)
+static noinline char *put_dec(char *buf, unsigned long long num)
{
while (1) {
unsigned rem;
@@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num,
spec.flags &= ~ZEROPAD;
sign = 0;
if (spec.flags & SIGN) {
- if ((signed long long) num < 0) {
+ if ((signed long long)num < 0) {
sign = '-';
- num = - (signed long long) num;
+ num = -(signed long long)num;
spec.field_width--;
} else if (spec.flags & PLUS) {
sign = '+';
@@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num,
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
- if (spec.base == 16) shift = 4;
+
+ if (spec.base == 16)
+ shift = 4;
do {
tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
num >>= shift;
@@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num,
/* leading space padding */
spec.field_width -= spec.precision;
if (!(spec.flags & (ZEROPAD+LEFT))) {
- while(--spec.field_width >= 0) {
+ while (--spec.field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
@@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num,
*buf = ' ';
++buf;
}
+
return buf;
}
-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
{
int len, i;
if ((unsigned long)s < PAGE_SIZE)
- s = "<NULL>";
+ s = "(null)";
len = strnlen(s, spec.precision);
@@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
*buf = ' ';
++buf;
}
+
return buf;
}
@@ -585,11 +576,13 @@ static char *symbol_string(char *buf, char *end, void *ptr,
sprint_symbol(sym, value);
else
kallsyms_lookup(value, NULL, NULL, NULL, sym);
+
return string(buf, end, sym, spec);
#else
- spec.field_width = 2*sizeof(void *);
+ spec.field_width = 2 * sizeof(void *);
spec.flags |= SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
+
return number(buf, end, value, spec);
#endif
}
@@ -718,22 +711,19 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
if (i < 3)
*p++ = '.';
}
-
*p = '\0';
+
return p;
}
static char *ip6_compressed_string(char *p, const char *addr)
{
- int i;
- int j;
- int range;
+ int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
- u8 hi;
- u8 lo;
+ u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
@@ -787,8 +777,9 @@ static char *ip6_compressed_string(char *p, const char *addr)
p = pack_hex_byte(p, hi);
else
*p++ = hex_asc_lo(hi);
+ p = pack_hex_byte(p, lo);
}
- if (hi || lo > 0x0f)
+ else if (lo > 0x0f)
p = pack_hex_byte(p, lo);
else
*p++ = hex_asc_lo(lo);
@@ -800,22 +791,23 @@ static char *ip6_compressed_string(char *p, const char *addr)
*p++ = ':';
p = ip4_string(p, &in6.s6_addr[12], false);
}
-
*p = '\0';
+
return p;
}
static char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
+
for (i = 0; i < 8; i++) {
p = pack_hex_byte(p, *addr++);
p = pack_hex_byte(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
-
*p = '\0';
+
return p;
}
@@ -842,6 +834,52 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
return string(buf, end, ip4_addr, spec);
}
+static char *uuid_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
+ char *p = uuid;
+ int i;
+ static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+ static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+ const u8 *index = be;
+ bool uc = false;
+
+ switch (*(++fmt)) {
+ case 'L':
+ uc = true; /* fall-through */
+ case 'l':
+ index = le;
+ break;
+ case 'B':
+ uc = true;
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ p = pack_hex_byte(p, addr[index[i]]);
+ switch (i) {
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ *p++ = '-';
+ break;
+ }
+ }
+
+ *p = 0;
+
+ if (uc) {
+ p = uuid;
+ do {
+ *p = toupper(*p);
+ } while (*(++p));
+ }
+
+ return string(buf, end, uuid, spec);
+}
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -866,6 +904,18 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* - 'I6c' for IPv6 addresses printed as specified by
* http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
+ * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ * Options for %pU are:
+ * b big endian lower case hex (default)
+ * B big endian UPPER case hex
+ * l little endian lower case hex
+ * L little endian UPPER case hex
+ * big endian output byte order is:
+ * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
+ * little endian output byte order is:
+ * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
+ *
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
@@ -880,9 +930,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
case 'f':
ptr = dereference_function_descriptor(ptr);
- case 's':
/* Fallthrough */
case 'S':
+ case 's':
return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
case 'r':
@@ -906,6 +956,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return ip4_addr_string(buf, end, ptr, spec, fmt);
}
break;
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
@@ -1023,8 +1075,8 @@ precision:
qualifier:
/* get the conversion qualifier */
spec->qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z' || *fmt == 't') {
spec->qualifier = *fmt++;
if (unlikely(spec->qualifier == *fmt)) {
if (spec->qualifier == 'l') {
@@ -1091,7 +1143,7 @@ qualifier:
spec->type = FORMAT_TYPE_LONG;
else
spec->type = FORMAT_TYPE_ULONG;
- } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
+ } else if (TOLOWER(spec->qualifier) == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1144,8 +1196,7 @@ qualifier:
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
unsigned long long num;
- char *str, *end, c;
- int read;
+ char *str, *end;
struct printf_spec spec = {0};
/* Reject out-of-range values early. Large positive sizes are
@@ -1164,8 +1215,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
while (*fmt) {
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1189,7 +1239,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
spec.precision = va_arg(args, int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1208,6 +1260,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
break;
+ }
case FORMAT_TYPE_STR:
str = string(str, end, va_arg(args, char *), spec);
@@ -1238,8 +1291,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
if (qualifier == 'l') {
long *ip = va_arg(args, long *);
*ip = (str - buf);
- } else if (qualifier == 'Z' ||
- qualifier == 'z') {
+ } else if (TOLOWER(qualifier) == 'z') {
size_t *ip = va_arg(args, size_t *);
*ip = (str - buf);
} else {
@@ -1322,7 +1374,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(vscnprintf);
@@ -1341,14 +1394,15 @@ EXPORT_SYMBOL(vscnprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int snprintf(char * buf, size_t size, const char *fmt, ...)
+int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(snprintf);
@@ -1364,7 +1418,7 @@ EXPORT_SYMBOL(snprintf);
* the trailing '\0'. If @size is <= 0 the function returns 0.
*/
-int scnprintf(char * buf, size_t size, const char *fmt, ...)
+int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
@@ -1372,6 +1426,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(scnprintf);
@@ -1409,14 +1464,15 @@ EXPORT_SYMBOL(vsprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int sprintf(char * buf, const char *fmt, ...)
+int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf, INT_MAX, fmt, args);
+ i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sprintf);
@@ -1449,7 +1505,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
struct printf_spec spec = {0};
char *str, *end;
- int read;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
@@ -1474,14 +1529,15 @@ do { \
str += sizeof(type); \
} while (0)
-
while (*fmt) {
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE:
+ case FORMAT_TYPE_INVALID:
+ case FORMAT_TYPE_PERCENT_CHAR:
break;
case FORMAT_TYPE_WIDTH:
@@ -1496,13 +1552,14 @@ do { \
case FORMAT_TYPE_STR: {
const char *save_str = va_arg(args, char *);
size_t len;
+
if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
|| (unsigned long)save_str < PAGE_SIZE)
- save_str = "<NULL>";
- len = strlen(save_str);
- if (str + len + 1 < end)
- memcpy(str, save_str, len + 1);
- str += len + 1;
+ save_str = "(null)";
+ len = strlen(save_str) + 1;
+ if (str + len < end)
+ memcpy(str, save_str, len);
+ str += len;
break;
}
@@ -1513,19 +1570,13 @@ do { \
fmt++;
break;
- case FORMAT_TYPE_PERCENT_CHAR:
- break;
-
- case FORMAT_TYPE_INVALID:
- break;
-
case FORMAT_TYPE_NRCHARS: {
/* skip %n 's argument */
int qualifier = spec.qualifier;
void *skip_arg;
if (qualifier == 'l')
skip_arg = va_arg(args, long *);
- else if (qualifier == 'Z' || qualifier == 'z')
+ else if (TOLOWER(qualifier) == 'z')
skip_arg = va_arg(args, size_t *);
else
skip_arg = va_arg(args, int *);
@@ -1561,8 +1612,8 @@ do { \
}
}
}
- return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
+ return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1591,11 +1642,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
*/
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
{
- unsigned long long num;
- char *str, *end, c;
- const char *args = (const char *)bin_buf;
-
struct printf_spec spec = {0};
+ char *str, *end;
+ const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE((int) size < 0))
return 0;
@@ -1625,10 +1674,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
while (*fmt) {
- int read;
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1652,7 +1699,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
spec.precision = get_arg(int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1670,11 +1719,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
++str;
}
break;
+ }
case FORMAT_TYPE_STR: {
const char *str_arg = args;
- size_t len = strlen(str_arg);
- args += len + 1;
+ args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
break;
}
@@ -1686,11 +1735,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
break;
case FORMAT_TYPE_PERCENT_CHAR:
- if (str < end)
- *str = '%';
- ++str;
- break;
-
case FORMAT_TYPE_INVALID:
if (str < end)
*str = '%';
@@ -1701,15 +1745,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
/* skip */
break;
- default:
+ default: {
+ unsigned long long num;
+
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = get_arg(long long);
break;
case FORMAT_TYPE_ULONG:
- num = get_arg(unsigned long);
- break;
case FORMAT_TYPE_LONG:
num = get_arg(unsigned long);
break;
@@ -1739,8 +1783,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
str = number(str, end, num, spec);
- }
- }
+ } /* default: */
+ } /* switch(spec.type) */
+ } /* while(*fmt) */
if (size > 0) {
if (str < end)
@@ -1774,6 +1819,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
ret = vbin_printf(bin_buf, size, fmt, args);
va_end(args);
+
return ret;
}
EXPORT_SYMBOL_GPL(bprintf);
@@ -1786,27 +1832,23 @@ EXPORT_SYMBOL_GPL(bprintf);
* @fmt: format of buffer
* @args: arguments
*/
-int vsscanf(const char * buf, const char * fmt, va_list args)
+int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
- int qualifier;
- int base;
- int field_width;
- int is_sign = 0;
+ int qualifier, base, field_width;
+ bool is_sign;
- while(*fmt && *str) {
+ while (*fmt && *str) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
- while (isspace(*fmt))
- ++fmt;
- while (isspace(*str))
- ++str;
+ fmt = skip_spaces(++fmt);
+ str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
@@ -1819,7 +1861,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
if (!*fmt)
break;
++fmt;
-
+
/* skip this conversion.
* advance both strings to next white space
*/
@@ -1838,8 +1880,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* get conversion qualifier */
qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
@@ -1851,16 +1893,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
}
}
}
- base = 10;
- is_sign = 0;
if (!*fmt || !*str)
break;
- switch(*fmt++) {
+ base = 10;
+ is_sign = 0;
+
+ switch (*fmt++) {
case 'c':
{
- char *s = (char *) va_arg(args,char*);
+ char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
@@ -1871,17 +1914,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
continue;
case 's':
{
- char *s = (char *) va_arg(args, char *);
- if(field_width == -1)
+ char *s = (char *)va_arg(args, char *);
+ if (field_width == -1)
field_width = INT_MAX;
/* first, skip leading white space in buffer */
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
/* now copy until next white space */
- while (*str && !isspace(*str) && field_width--) {
+ while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
- }
*s = '\0';
num++;
}
@@ -1889,7 +1930,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
case 'n':
/* return number of characters read so far */
{
- int *i = (int *)va_arg(args,int*);
+ int *i = (int *)va_arg(args, int*);
*i = str - buf;
}
continue;
@@ -1901,14 +1942,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
base = 16;
break;
case 'i':
- base = 0;
+ base = 0;
case 'd':
is_sign = 1;
case 'u':
break;
case '%':
/* looking for '%' in str */
- if (*str++ != '%')
+ if (*str++ != '%')
return num;
continue;
default:
@@ -1919,71 +1960,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-')
digit = *(str + 1);
if (!digit
- || (base == 16 && !isxdigit(digit))
- || (base == 10 && !isdigit(digit))
- || (base == 8 && (!isdigit(digit) || digit > '7'))
- || (base == 0 && !isdigit(digit)))
- break;
+ || (base == 16 && !isxdigit(digit))
+ || (base == 10 && !isdigit(digit))
+ || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 0 && !isdigit(digit)))
+ break;
- switch(qualifier) {
+ switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign) {
- signed char *s = (signed char *) va_arg(args,signed char *);
- *s = (signed char) simple_strtol(str,&next,base);
+ signed char *s = (signed char *)va_arg(args, signed char *);
+ *s = (signed char)simple_strtol(str, &next, base);
} else {
- unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
- *s = (unsigned char) simple_strtoul(str, &next, base);
+ unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
+ *s = (unsigned char)simple_strtoul(str, &next, base);
}
break;
case 'h':
if (is_sign) {
- short *s = (short *) va_arg(args,short *);
- *s = (short) simple_strtol(str,&next,base);
+ short *s = (short *)va_arg(args, short *);
+ *s = (short)simple_strtol(str, &next, base);
} else {
- unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
- *s = (unsigned short) simple_strtoul(str, &next, base);
+ unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
+ *s = (unsigned short)simple_strtoul(str, &next, base);
}
break;
case 'l':
if (is_sign) {
- long *l = (long *) va_arg(args,long *);
- *l = simple_strtol(str,&next,base);
+ long *l = (long *)va_arg(args, long *);
+ *l = simple_strtol(str, &next, base);
} else {
- unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
- *l = simple_strtoul(str,&next,base);
+ unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
+ *l = simple_strtoul(str, &next, base);
}
break;
case 'L':
if (is_sign) {
- long long *l = (long long*) va_arg(args,long long *);
- *l = simple_strtoll(str,&next,base);
+ long long *l = (long long *)va_arg(args, long long *);
+ *l = simple_strtoll(str, &next, base);
} else {
- unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
- *l = simple_strtoull(str,&next,base);
+ unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
+ *l = simple_strtoull(str, &next, base);
}
break;
case 'Z':
case 'z':
{
- size_t *s = (size_t*) va_arg(args,size_t*);
- *s = (size_t) simple_strtoul(str,&next,base);
+ size_t *s = (size_t *)va_arg(args, size_t *);
+ *s = (size_t)simple_strtoul(str, &next, base);
}
break;
default:
if (is_sign) {
- int *i = (int *) va_arg(args, int*);
- *i = (int) simple_strtol(str,&next,base);
+ int *i = (int *)va_arg(args, int *);
+ *i = (int)simple_strtol(str, &next, base);
} else {
- unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
- *i = (unsigned int) simple_strtoul(str,&next,base);
+ unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
+ *i = (unsigned int)simple_strtoul(str, &next, base);
}
break;
}
@@ -2014,14 +2054,15 @@ EXPORT_SYMBOL(vsscanf);
* @fmt: formatting of buffer
* @...: resulting arguments
*/
-int sscanf(const char * buf, const char * fmt, ...)
+int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
- va_start(args,fmt);
- i = vsscanf(buf,fmt,args);
+ va_start(args, fmt);
+ i = vsscanf(buf, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sscanf);
diff --git a/mm/Kconfig b/mm/Kconfig
index 44cf6f0a3a6d..2310984591ed 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -158,11 +158,13 @@ config PAGEFLAGS_EXTENDED
# Default to 4 for wider testing, though 8 might be more appropriate.
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
+# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
config SPLIT_PTLOCK_CPUS
int
- default "4096" if ARM && !CPU_CACHE_VIPT
- default "4096" if PARISC && !PA20
+ default "999999" if ARM && !CPU_CACHE_VIPT
+ default "999999" if PARISC && !PA20
+ default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
default "4"
#
@@ -200,14 +202,6 @@ config VIRT_TO_BUS
def_bool y
depends on !ARCH_NO_VIRT_TO_BUS
-config HAVE_MLOCK
- bool
- default y if MMU=y
-
-config HAVE_MLOCKED_PAGE_BIT
- bool
- default y if HAVE_MLOCK=y
-
config MMU_NOTIFIER
bool
@@ -218,7 +212,7 @@ config KSM
Enable Kernel Samepage Merging: KSM periodically scans those areas
of an application's address space that an app has advised may be
mergeable. When it finds pages of identical content, it replaces
- the many instances by a single resident page with that content, so
+ the many instances by a single page with that content, so
saving memory until one or another app needs to modify the content.
Recommended for use with KVM, or with other duplicative applications.
See Documentation/vm/ksm.txt for more information: KSM is inactive
diff --git a/mm/Makefile b/mm/Makefile
index ebf849042ed3..82131d0f8d85 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
-ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
obj-$(CONFIG_SMP) += percpu.o
-else
-obj-$(CONFIG_SMP) += allocpercpu.o
-endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index df34ceae0c67..000000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * linux/mm/allocpercpu.c
- *
- * Separated from slab.c August 11, 2006 Christoph Lameter
- */
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <asm/sections.h>
-
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
-/**
- * percpu_depopulate - depopulate per-cpu data for given cpu
- * @__pdata: per-cpu data to depopulate
- * @cpu: depopulate per-cpu data for this cpu
- *
- * Depopulating per-cpu data for a cpu going offline would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- */
-static void percpu_depopulate(void *__pdata, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
-
- kfree(pdata->ptrs[cpu]);
- pdata->ptrs[cpu] = NULL;
-}
-
-/**
- * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
- * @__pdata: per-cpu data to depopulate
- * @mask: depopulate per-cpu data for cpu's selected through mask bits
- */
-static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
-{
- int cpu;
- for_each_cpu_mask_nr(cpu, *mask)
- percpu_depopulate(__pdata, cpu);
-}
-
-#define percpu_depopulate_mask(__pdata, mask) \
- __percpu_depopulate_mask((__pdata), &(mask))
-
-/**
- * percpu_populate - populate per-cpu data for given cpu
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @cpu: populate per-data for this cpu
- *
- * Populating per-cpu data for a cpu coming online would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- * Per-cpu object is populated with zeroed buffer.
- */
-static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
- int node = cpu_to_node(cpu);
-
- /*
- * We should make sure each CPU gets private memory.
- */
- size = roundup(size, cache_line_size());
-
- BUG_ON(pdata->ptrs[cpu]);
- if (node_online(node))
- pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
- else
- pdata->ptrs[cpu] = kzalloc(size, gfp);
- return pdata->ptrs[cpu];
-}
-
-/**
- * percpu_populate_mask - populate per-cpu data for more cpu's
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-cpu data for cpu's selected through mask bits
- *
- * Per-cpu objects are populated with zeroed buffers.
- */
-static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
- cpumask_t *mask)
-{
- cpumask_t populated;
- int cpu;
-
- cpus_clear(populated);
- for_each_cpu_mask_nr(cpu, *mask)
- if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
- __percpu_depopulate_mask(__pdata, &populated);
- return -ENOMEM;
- } else
- cpu_set(cpu, populated);
- return 0;
-}
-
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
- __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-
-/**
- * alloc_percpu - initial setup of per-cpu data
- * @size: size of per-cpu object
- * @align: alignment
- *
- * Allocate dynamic percpu area. Percpu objects are populated with
- * zeroed buffers.
- */
-void *__alloc_percpu(size_t size, size_t align)
-{
- /*
- * We allocate whole cache lines to avoid false sharing
- */
- size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, GFP_KERNEL);
- void *__pdata = __percpu_disguise(pdata);
-
- /*
- * Can't easily make larger alignment work with kmalloc. WARN
- * on it. Larger alignment should only be used for module
- * percpu sections on SMP for which this path isn't used.
- */
- WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-
- if (unlikely(!pdata))
- return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
- &cpu_possible_map)))
- return __pdata;
- kfree(pdata);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(__alloc_percpu);
-
-/**
- * free_percpu - final cleanup of per-cpu data
- * @__pdata: object to clean up
- *
- * We simply clean up any per-cpu object left. No need for the client to
- * track and specify through a bis mask which per-cpu objects are to free.
- */
-void free_percpu(void *__pdata)
-{
- if (unlikely(!__pdata))
- return;
- __percpu_depopulate_mask(__pdata, cpu_possible_mask);
- kfree(__percpu_disguise(__pdata));
-}
-EXPORT_SYMBOL_GPL(free_percpu);
-
-/*
- * Generic percpu area setup.
- */
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_offset);
-
-void __init setup_per_cpu_areas(void)
-{
- unsigned long size, i;
- char *ptr;
- unsigned long nr_possible_cpus = num_possible_cpus();
-
- /* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
- ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-
- for_each_possible_cpu(i) {
- __per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
- ptr += size;
- }
-}
-#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d1dc23cc7f10..7d1486875e1c 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -432,8 +432,8 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
- unsigned long step)
+static unsigned long __init align_idx(struct bootmem_data *bdata,
+ unsigned long idx, unsigned long step)
{
unsigned long base = bdata->node_min_pfn;
@@ -445,8 +445,8 @@ static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
return ALIGN(base + idx, step) - base;
}
-static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
- unsigned long align)
+static unsigned long __init align_off(struct bootmem_data *bdata,
+ unsigned long off, unsigned long align)
{
unsigned long base = PFN_PHYS(bdata->node_min_pfn);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5d7601b02874..65f38c218207 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/hugetlb.h>
+#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -622,42 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
}
/*
- * Use a helper variable to find the next node and then
- * copy it back to next_nid_to_alloc afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do. Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
+ * common helper functions for hstate_next_node_to_{alloc|free}.
+ * We may have allocated or freed a huge page based on a different
+ * nodes_allowed previously, so h->next_node_to_{alloc|free} might
+ * be outside of *nodes_allowed. Ensure that we use an allowed
+ * node for alloc or free.
*/
-static int hstate_next_node_to_alloc(struct hstate *h)
+static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_alloc, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->next_nid_to_alloc = next_nid;
- return next_nid;
+ nid = next_node(nid, *nodes_allowed);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(*nodes_allowed);
+ VM_BUG_ON(nid >= MAX_NUMNODES);
+
+ return nid;
+}
+
+static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
+{
+ if (!node_isset(nid, *nodes_allowed))
+ nid = next_node_allowed(nid, nodes_allowed);
+ return nid;
+}
+
+/*
+ * returns the previously saved node ["this node"] from which to
+ * allocate a persistent huge page for the pool and advance the
+ * next node from which to allocate, handling wrap at end of node
+ * mask.
+ */
+static int hstate_next_node_to_alloc(struct hstate *h,
+ nodemask_t *nodes_allowed)
+{
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
+ h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h)
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
next_nid = start_nid;
do {
page = alloc_fresh_huge_page_node(h, next_nid);
- if (page)
+ if (page) {
ret = 1;
- next_nid = hstate_next_node_to_alloc(h);
- } while (!page && next_nid != start_nid);
+ break;
+ }
+ next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+ } while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -668,17 +693,21 @@ static int alloc_fresh_huge_page(struct hstate *h)
}
/*
- * helper for free_pool_huge_page() - find next node
- * from which to free a huge page
+ * helper for free_pool_huge_page() - return the previously saved
+ * node ["this node"] from which to free a huge page. Advance the
+ * next node id whether or not we find a free huge page to free so
+ * that the next attempt to free addresses the next node.
*/
-static int hstate_next_node_to_free(struct hstate *h)
+static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_free, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->next_nid_to_free = next_nid;
- return next_nid;
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
+ h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
}
/*
@@ -687,13 +716,14 @@ static int hstate_next_node_to_free(struct hstate *h)
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
-static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+ bool acct_surplus)
{
int start_nid;
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
@@ -715,9 +745,10 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
}
update_and_free_page(h, page);
ret = 1;
+ break;
}
- next_nid = hstate_next_node_to_free(h);
- } while (!ret && next_nid != start_nid);
+ next_nid = hstate_next_node_to_free(h, nodes_allowed);
+ } while (next_nid != start_nid);
return ret;
}
@@ -911,14 +942,14 @@ static void return_unused_surplus_pages(struct hstate *h,
/*
* We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- * free_pool_huge_page() will balance the the frees across the
- * on-line nodes for us and will handle the hstate accounting.
+ * evenly across all nodes with memory. Iterate across these nodes
+ * until we can no longer free unreserved surplus pages. This occurs
+ * when the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the freed pages across the
+ * on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, 1))
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
@@ -1022,16 +1053,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_online_map);
+ int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(h->next_nid_to_alloc),
+ NODE_DATA(hstate_next_node_to_alloc(h,
+ &node_states[N_HIGH_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
- hstate_next_node_to_alloc(h);
if (addr) {
/*
* Use the beginning of the huge page to store the
@@ -1084,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h))
+ } else if (!alloc_fresh_huge_page(h,
+ &node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1126,14 +1158,15 @@ static void __init report_hugepages(void)
}
#ifdef CONFIG_HIGHMEM
-static void try_to_free_low(struct hstate *h, unsigned long count)
+static void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
- for (i = 0; i < MAX_NUMNODES; ++i) {
+ for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
@@ -1149,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
}
}
#else
-static inline void try_to_free_low(struct hstate *h, unsigned long count)
+static inline void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
}
#endif
@@ -1159,7 +1193,8 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
-static int adjust_pool_surplus(struct hstate *h, int delta)
+static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
+ int delta)
{
int start_nid, next_nid;
int ret = 0;
@@ -1167,29 +1202,33 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
else
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
int nid = next_nid;
if (delta < 0) {
- next_nid = hstate_next_node_to_alloc(h);
/*
* To shrink on this node, there must be a surplus page
*/
- if (!h->surplus_huge_pages_node[nid])
+ if (!h->surplus_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_alloc(h,
+ nodes_allowed);
continue;
+ }
}
if (delta > 0) {
- next_nid = hstate_next_node_to_free(h);
/*
* Surplus cannot exceed the total number of pages
*/
if (h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid])
+ h->nr_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_free(h,
+ nodes_allowed);
continue;
+ }
}
h->surplus_huge_pages += delta;
@@ -1202,7 +1241,8 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
@@ -1222,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, -1))
+ if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
@@ -1233,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
- ret = alloc_fresh_huge_page(h);
+ ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
+ /* Bail for signals. Probably ctrl-c from user */
+ if (signal_pending(current))
+ goto out;
}
/*
@@ -1257,13 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
- try_to_free_low(h, min_count);
+ try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
- if (!free_pool_huge_page(h, 0))
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, 1))
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
@@ -1282,43 +1325,117 @@ out:
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
+
for (i = 0; i < HUGE_MAX_HSTATE; i++)
- if (hstate_kobjs[i] == kobj)
+ if (hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = NUMA_NO_NODE;
return &hstates[i];
- BUG();
- return NULL;
+ }
+
+ return kobj_to_node_hstate(kobj, nidp);
}
-static ssize_t nr_hugepages_show(struct kobject *kobj,
+static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->nr_huge_pages);
+ struct hstate *h;
+ unsigned long nr_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ nr_huge_pages = h->nr_huge_pages;
+ else
+ nr_huge_pages = h->nr_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", nr_huge_pages);
}
-static ssize_t nr_hugepages_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t len)
{
int err;
- unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ int nid;
+ unsigned long count;
+ struct hstate *h;
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
- err = strict_strtoul(buf, 10, &input);
+ err = strict_strtoul(buf, 10, &count);
if (err)
return 0;
- h->max_huge_pages = set_max_huge_pages(h, input);
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE) {
+ /*
+ * global hstate attribute
+ */
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ } else if (nodes_allowed) {
+ /*
+ * per node hstate attribute: adjust count to global,
+ * but restrict alloc/free to the specified node.
+ */
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ init_nodemask_of_node(nodes_allowed, nid);
+ } else
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+
+ h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
- return count;
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+
+ return len;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(false, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages);
+#ifdef CONFIG_NUMA
+
+/*
+ * hstate attribute for optionally mempolicy-based constraint on persistent
+ * huge page alloc/free.
+ */
+static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(true, kobj, attr, buf, len);
+}
+HSTATE_ATTR(nr_hugepages_mempolicy);
+#endif
+
+
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
@@ -1326,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
{
int err;
unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
err = strict_strtoul(buf, 10, &input);
if (err)
@@ -1343,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->free_huge_pages);
+ struct hstate *h;
+ unsigned long free_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ free_huge_pages = h->free_huge_pages;
+ else
+ free_huge_pages = h->free_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
@@ -1359,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+ struct hstate *h;
+ unsigned long surplus_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ surplus_huge_pages = h->surplus_huge_pages;
+ else
+ surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
@@ -1370,6 +1505,9 @@ static struct attribute *hstate_attrs[] = {
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
+#ifdef CONFIG_NUMA
+ &nr_hugepages_mempolicy_attr.attr,
+#endif
NULL,
};
@@ -1377,19 +1515,21 @@ static struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
+ struct kobject *parent,
+ struct kobject **hstate_kobjs,
+ struct attribute_group *hstate_attr_group)
{
int retval;
+ int hi = h - hstates;
- hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
- hugepages_kobj);
- if (!hstate_kobjs[h - hstates])
+ hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+ if (!hstate_kobjs[hi])
return -ENOMEM;
- retval = sysfs_create_group(hstate_kobjs[h - hstates],
- &hstate_attr_group);
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hi]);
return retval;
}
@@ -1404,17 +1544,184 @@ static void __init hugetlb_sysfs_init(void)
return;
for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h);
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
+#ifdef CONFIG_NUMA
+
+/*
+ * node_hstate/s - associate per node hstate attributes, via their kobjects,
+ * with node sysdevs in node_devices[] using a parallel array. The array
+ * index of a node sysdev or _hstate == node id.
+ * This is here to avoid any static dependency of the node sysdev driver, in
+ * the base kernel, on the hugetlb module.
+ */
+struct node_hstate {
+ struct kobject *hugepages_kobj;
+ struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+/*
+ * A subset of global hstate attributes for node sysdevs
+ */
+static struct attribute *per_node_hstate_attrs[] = {
+ &nr_hugepages_attr.attr,
+ &free_hugepages_attr.attr,
+ &surplus_hugepages_attr.attr,
+ NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+ .attrs = per_node_hstate_attrs,
+};
+
+/*
+ * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * Returns node id via non-NULL nidp.
+ */
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node_hstate *nhs = &node_hstates[nid];
+ int i;
+ for (i = 0; i < HUGE_MAX_HSTATE; i++)
+ if (nhs->hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = nid;
+ return &hstates[i];
+ }
+ }
+
+ BUG();
+ return NULL;
+}
+
+/*
+ * Unregister hstate attributes from a single node sysdev.
+ * No-op if no hstate attributes attached.
+ */
+void hugetlb_unregister_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+ if (!nhs->hugepages_kobj)
+ return; /* no hstate attributes */
+
+ for_each_hstate(h)
+ if (nhs->hstate_kobjs[h - hstates]) {
+ kobject_put(nhs->hstate_kobjs[h - hstates]);
+ nhs->hstate_kobjs[h - hstates] = NULL;
+ }
+
+ kobject_put(nhs->hugepages_kobj);
+ nhs->hugepages_kobj = NULL;
+}
+
+/*
+ * hugetlb module exit: unregister hstate attributes from node sysdevs
+ * that have them.
+ */
+static void hugetlb_unregister_all_nodes(void)
+{
+ int nid;
+
+ /*
+ * disable node sysdev registrations.
+ */
+ register_hugetlbfs_with_node(NULL, NULL);
+
+ /*
+ * remove hstate attributes from any nodes that have them.
+ */
+ for (nid = 0; nid < nr_node_ids; nid++)
+ hugetlb_unregister_node(&node_devices[nid]);
+}
+
+/*
+ * Register hstate attributes for a single node sysdev.
+ * No-op if attributes already registered.
+ */
+void hugetlb_register_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ int err;
+
+ if (nhs->hugepages_kobj)
+ return; /* already allocated */
+
+ nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+ &node->sysdev.kobj);
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+ nhs->hstate_kobjs,
+ &per_node_hstate_attr_group);
+ if (err) {
+ printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+ " for node %d\n",
+ h->name, node->sysdev.id);
+ hugetlb_unregister_node(node);
+ break;
+ }
+ }
+}
+
+/*
+ * hugetlb init time: register hstate attributes for all registered node
+ * sysdevs of nodes that have memory. All on-line nodes should have
+ * registered their associated sysdev by this time.
+ */
+static void hugetlb_register_all_nodes(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ struct node *node = &node_devices[nid];
+ if (node->sysdev.id == nid)
+ hugetlb_register_node(node);
+ }
+
+ /*
+ * Let the node sysdev driver know we're here so it can
+ * [un]register hstate attributes on node hotplug.
+ */
+ register_hugetlbfs_with_node(hugetlb_register_node,
+ hugetlb_unregister_node);
+}
+#else /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ BUG();
+ if (nidp)
+ *nidp = -1;
+ return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
static void __exit hugetlb_exit(void)
{
struct hstate *h;
+ hugetlb_unregister_all_nodes();
+
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
@@ -1449,6 +1756,8 @@ static int __init hugetlb_init(void)
hugetlb_sysfs_init();
+ hugetlb_register_all_nodes();
+
return 0;
}
module_init(hugetlb_init);
@@ -1472,8 +1781,8 @@ void __init hugetlb_add_hstate(unsigned order)
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->next_nid_to_alloc = first_node(node_online_map);
- h->next_nid_to_free = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
@@ -1536,9 +1845,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
}
#ifdef CONFIG_SYSCTL
-int hugetlb_sysctl_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *length, loff_t *ppos)
+static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+ struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
@@ -1550,12 +1859,40 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
table->maxlen = sizeof(unsigned long);
proc_doulongvec_minmax(table, write, buffer, length, ppos);
- if (write)
- h->max_huge_pages = set_max_huge_pages(h, tmp);
+ if (write) {
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed,
+ GFP_KERNEL | __GFP_NORETRY);
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
+
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+ }
return 0;
}
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+
+ return hugetlb_sysctl_handler_common(false, table, write,
+ buffer, length, ppos);
+}
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ return hugetlb_sysctl_handler_common(true, table, write,
+ buffer, length, ppos);
+}
+#endif /* CONFIG_NUMA */
+
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
@@ -1903,6 +2240,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ (vma->vm_pgoff >> PAGE_SHIFT);
mapping = (struct address_space *)page_private(page);
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+ * this mapping should be shared between all the VMAs,
+ * __unmap_hugepage_range() is called as the lock is already held
+ */
+ spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
@@ -1916,10 +2259,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
- unmap_hugepage_range(iter_vma,
+ __unmap_hugepage_range(iter_vma,
address, address + huge_page_size(h),
page);
}
+ spin_unlock(&mapping->i_mmap_lock);
return 1;
}
@@ -1959,6 +2303,9 @@ retry_avoidcopy:
outside_reserve = 1;
page_cache_get(old_page);
+
+ /* Drop page_table_lock as buddy allocator may be called */
+ spin_unlock(&mm->page_table_lock);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
@@ -1976,19 +2323,25 @@ retry_avoidcopy:
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(page_count(old_page) != 1);
BUG_ON(huge_pte_none(pte));
+ spin_lock(&mm->page_table_lock);
goto retry_avoidcopy;
}
WARN_ON_ONCE(1);
}
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return -PTR_ERR(new_page);
}
- spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
- spin_lock(&mm->page_table_lock);
+ /*
+ * Retake the page_table_lock to check for racing updates
+ * before the page tables are altered
+ */
+ spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb8..4fe67a162cb4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,7 +63,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
-#ifdef CONFIG_HAVE_MLOCK
+#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -72,21 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
-#endif
-
-/*
- * unevictable_migrate_page() called only from migrate_page_copy() to
- * migrate unevictable flag to new page.
- * Note that the old page has been isolated from the LRU lists at this
- * point so we don't need to worry about LRU statistics.
- */
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
- if (TestClearPageUnevictable(old))
- SetPageUnevictable(new);
-}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
@@ -107,9 +93,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
}
/*
- * must be called with vma's mmap_sem held for read, and page locked.
+ * must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
+extern void munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
@@ -144,7 +131,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
}
}
-#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#else /* !CONFIG_MMU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -153,7 +140,7 @@ static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#endif /* !CONFIG_MMU */
/*
* Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/ksm.c b/mm/ksm.c
index 5575f8628fef..56a0da1f9979 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -29,11 +29,13 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
+#include <linux/memory.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/ksm.h>
#include <asm/tlbflush.h>
+#include "internal.h"
/*
* A few notes about the KSM scanning process,
@@ -79,13 +81,13 @@
* struct mm_slot - ksm information per mm that is being scanned
* @link: link to the mm_slots hash list
* @mm_list: link into the mm_slots list, rooted in ksm_mm_head
- * @rmap_list: head for this mm_slot's list of rmap_items
+ * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
* @mm: the mm that this information is valid for
*/
struct mm_slot {
struct hlist_node link;
struct list_head mm_list;
- struct list_head rmap_list;
+ struct rmap_item *rmap_list;
struct mm_struct *mm;
};
@@ -93,7 +95,7 @@ struct mm_slot {
* struct ksm_scan - cursor for scanning
* @mm_slot: the current mm_slot we are scanning
* @address: the next address inside that to be scanned
- * @rmap_item: the current rmap that we are scanning inside the rmap_list
+ * @rmap_list: link to the next rmap to be scanned in the rmap_list
* @seqnr: count of completed full scans (needed when removing unstable node)
*
* There is only the one ksm_scan instance of this cursor structure.
@@ -101,37 +103,51 @@ struct mm_slot {
struct ksm_scan {
struct mm_slot *mm_slot;
unsigned long address;
- struct rmap_item *rmap_item;
+ struct rmap_item **rmap_list;
unsigned long seqnr;
};
/**
+ * struct stable_node - node of the stable rbtree
+ * @node: rb node of this ksm page in the stable tree
+ * @hlist: hlist head of rmap_items using this ksm page
+ * @kpfn: page frame number of this ksm page
+ */
+struct stable_node {
+ struct rb_node node;
+ struct hlist_head hlist;
+ unsigned long kpfn;
+};
+
+/**
* struct rmap_item - reverse mapping item for virtual addresses
- * @link: link into mm_slot's rmap_list (rmap_list is per mm)
+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
* @mm: the memory structure this rmap_item is pointing into
* @address: the virtual address this rmap_item tracks (+ flags in low bits)
* @oldchecksum: previous checksum of the page at that virtual address
- * @node: rb_node of this rmap_item in either unstable or stable tree
- * @next: next rmap_item hanging off the same node of the stable tree
- * @prev: previous rmap_item hanging off the same node of the stable tree
+ * @node: rb node of this rmap_item in the unstable tree
+ * @head: pointer to stable_node heading this list in the stable tree
+ * @hlist: link into hlist of rmap_items hanging off that stable_node
*/
struct rmap_item {
- struct list_head link;
+ struct rmap_item *rmap_list;
+ struct anon_vma *anon_vma; /* when stable */
struct mm_struct *mm;
unsigned long address; /* + low bits used for flags below */
+ unsigned int oldchecksum; /* when unstable */
union {
- unsigned int oldchecksum; /* when unstable */
- struct rmap_item *next; /* when stable */
- };
- union {
- struct rb_node node; /* when tree node */
- struct rmap_item *prev; /* in stable list */
+ struct rb_node node; /* when node of unstable tree */
+ struct { /* when listed from stable tree */
+ struct stable_node *head;
+ struct hlist_node hlist;
+ };
};
};
#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
-#define NODE_FLAG 0x100 /* is a node of unstable or stable tree */
-#define STABLE_FLAG 0x200 /* is a node or list item of stable tree */
+#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
+#define STABLE_FLAG 0x200 /* is listed from the stable tree */
/* The stable and unstable tree heads */
static struct rb_root root_stable_tree = RB_ROOT;
@@ -148,6 +164,7 @@ static struct ksm_scan ksm_scan = {
};
static struct kmem_cache *rmap_item_cache;
+static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
/* The number of nodes in the stable tree */
@@ -162,9 +179,6 @@ static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
-/* Limit on the number of unswappable pages used */
-static unsigned long ksm_max_kernel_pages;
-
/* Number of pages ksmd should scan in one batch */
static unsigned int ksm_thread_pages_to_scan = 100;
@@ -190,13 +204,19 @@ static int __init ksm_slab_init(void)
if (!rmap_item_cache)
goto out;
+ stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
+ if (!stable_node_cache)
+ goto out_free1;
+
mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
if (!mm_slot_cache)
- goto out_free;
+ goto out_free2;
return 0;
-out_free:
+out_free2:
+ kmem_cache_destroy(stable_node_cache);
+out_free1:
kmem_cache_destroy(rmap_item_cache);
out:
return -ENOMEM;
@@ -205,6 +225,7 @@ out:
static void __init ksm_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
+ kmem_cache_destroy(stable_node_cache);
kmem_cache_destroy(rmap_item_cache);
mm_slot_cache = NULL;
}
@@ -226,6 +247,16 @@ static inline void free_rmap_item(struct rmap_item *rmap_item)
kmem_cache_free(rmap_item_cache, rmap_item);
}
+static inline struct stable_node *alloc_stable_node(void)
+{
+ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
+}
+
+static inline void free_stable_node(struct stable_node *stable_node)
+{
+ kmem_cache_free(stable_node_cache, stable_node);
+}
+
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
@@ -275,7 +306,6 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
mm_slot->mm = mm;
- INIT_LIST_HEAD(&mm_slot->rmap_list);
hlist_add_head(&mm_slot->link, bucket);
}
@@ -284,6 +314,25 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
return rmap_item->address & STABLE_FLAG;
}
+static void hold_anon_vma(struct rmap_item *rmap_item,
+ struct anon_vma *anon_vma)
+{
+ rmap_item->anon_vma = anon_vma;
+ atomic_inc(&anon_vma->ksm_refcount);
+}
+
+static void drop_anon_vma(struct rmap_item *rmap_item)
+{
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+
+ if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+ int empty = list_empty(&anon_vma->head);
+ spin_unlock(&anon_vma->lock);
+ if (empty)
+ anon_vma_free(anon_vma);
+ }
+}
+
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
@@ -356,10 +405,18 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
}
-static void break_cow(struct mm_struct *mm, unsigned long addr)
+static void break_cow(struct rmap_item *rmap_item)
{
+ struct mm_struct *mm = rmap_item->mm;
+ unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
+ /*
+ * It is not an accident that whenever we want to break COW
+ * to undo, we also need to drop a reference to the anon_vma.
+ */
+ drop_anon_vma(rmap_item);
+
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto out;
@@ -403,21 +460,77 @@ out: page = NULL;
return page;
}
+static void remove_node_from_stable_tree(struct stable_node *stable_node)
+{
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing--;
+ else
+ ksm_pages_shared--;
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
+ cond_resched();
+ }
+
+ rb_erase(&stable_node->node, &root_stable_tree);
+ free_stable_node(stable_node);
+}
+
/*
- * get_ksm_page: checks if the page at the virtual address in rmap_item
- * is still PageKsm, in which case we can trust the content of the page,
- * and it returns the gotten page; but NULL if the page has been zapped.
+ * get_ksm_page: checks if the page indicated by the stable node
+ * is still its ksm page, despite having held no reference to it.
+ * In which case we can trust the content of the page, and it
+ * returns the gotten page; but if the page has now been zapped,
+ * remove the stale node from the stable tree and return NULL.
+ *
+ * You would expect the stable_node to hold a reference to the ksm page.
+ * But if it increments the page's count, swapping out has to wait for
+ * ksmd to come around again before it can free the page, which may take
+ * seconds or even minutes: much too unresponsive. So instead we use a
+ * "keyhole reference": access to the ksm page from the stable node peeps
+ * out through its keyhole to see if that page still holds the right key,
+ * pointing back to this stable node. This relies on freeing a PageAnon
+ * page to reset its page->mapping to NULL, and relies on no other use of
+ * a page to put something that might look like our key in page->mapping.
+ *
+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
+ * but this is different - made simpler by ksm_thread_mutex being held, but
+ * interesting for assuming that no other use of the struct page could ever
+ * put our expected_mapping into page->mapping (or a field of the union which
+ * coincides with page->mapping). The RCU calls are not for KSM at all, but
+ * to keep the page_count protocol described with page_cache_get_speculative.
+ *
+ * Note: it is possible that get_ksm_page() will return NULL one moment,
+ * then page the next, if the page is in between page_freeze_refs() and
+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
+ * is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct rmap_item *rmap_item)
+static struct page *get_ksm_page(struct stable_node *stable_node)
{
struct page *page;
-
- page = get_mergeable_page(rmap_item);
- if (page && !PageKsm(page)) {
+ void *expected_mapping;
+
+ page = pfn_to_page(stable_node->kpfn);
+ expected_mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ rcu_read_lock();
+ if (page->mapping != expected_mapping)
+ goto stale;
+ if (!get_page_unless_zero(page))
+ goto stale;
+ if (page->mapping != expected_mapping) {
put_page(page);
- page = NULL;
+ goto stale;
}
+ rcu_read_unlock();
return page;
+stale:
+ rcu_read_unlock();
+ remove_node_from_stable_tree(stable_node);
+ return NULL;
}
/*
@@ -426,35 +539,29 @@ static struct page *get_ksm_page(struct rmap_item *rmap_item)
*/
static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
{
- if (in_stable_tree(rmap_item)) {
- struct rmap_item *next_item = rmap_item->next;
-
- if (rmap_item->address & NODE_FLAG) {
- if (next_item) {
- rb_replace_node(&rmap_item->node,
- &next_item->node,
- &root_stable_tree);
- next_item->address |= NODE_FLAG;
- ksm_pages_sharing--;
- } else {
- rb_erase(&rmap_item->node, &root_stable_tree);
- ksm_pages_shared--;
- }
- } else {
- struct rmap_item *prev_item = rmap_item->prev;
+ if (rmap_item->address & STABLE_FLAG) {
+ struct stable_node *stable_node;
+ struct page *page;
- BUG_ON(prev_item->next != rmap_item);
- prev_item->next = next_item;
- if (next_item) {
- BUG_ON(next_item->prev != rmap_item);
- next_item->prev = rmap_item->prev;
- }
+ stable_node = rmap_item->head;
+ page = get_ksm_page(stable_node);
+ if (!page)
+ goto out;
+
+ lock_page(page);
+ hlist_del(&rmap_item->hlist);
+ unlock_page(page);
+ put_page(page);
+
+ if (stable_node->hlist.first)
ksm_pages_sharing--;
- }
+ else
+ ksm_pages_shared--;
- rmap_item->next = NULL;
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
- } else if (rmap_item->address & NODE_FLAG) {
+ } else if (rmap_item->address & UNSTABLE_FLAG) {
unsigned char age;
/*
* Usually ksmd can and must skip the rb_erase, because
@@ -467,24 +574,21 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
BUG_ON(age > 1);
if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree);
+
ksm_pages_unshared--;
+ rmap_item->address &= PAGE_MASK;
}
-
- rmap_item->address &= PAGE_MASK;
-
+out:
cond_resched(); /* we're called from many long loops */
}
static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
- struct list_head *cur)
+ struct rmap_item **rmap_list)
{
- struct rmap_item *rmap_item;
-
- while (cur != &mm_slot->rmap_list) {
- rmap_item = list_entry(cur, struct rmap_item, link);
- cur = cur->next;
+ while (*rmap_list) {
+ struct rmap_item *rmap_item = *rmap_list;
+ *rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
- list_del(&rmap_item->link);
free_rmap_item(rmap_item);
}
}
@@ -550,7 +654,7 @@ static int unmerge_and_remove_all_rmap_items(void)
goto error;
}
- remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
+ remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -646,7 +750,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* Check that no O_DIRECT or similar I/O is in progress on the
* page
*/
- if ((page_mapcount(page) + 2 + swapped) != page_count(page)) {
+ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
set_pte_at_notify(mm, addr, ptep, entry);
goto out_unlock;
}
@@ -664,15 +768,15 @@ out:
/**
* replace_page - replace page in vma by new ksm page
- * @vma: vma that holds the pte pointing to oldpage
- * @oldpage: the page we are replacing by newpage
- * @newpage: the ksm page we replace oldpage by
+ * @vma: vma that holds the pte pointing to page
+ * @page: the page we are replacing by kpage
+ * @kpage: the ksm page we replace page by
* @orig_pte: the original value of the pte
*
* Returns 0 on success, -EFAULT on failure.
*/
-static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
- struct page *newpage, pte_t orig_pte)
+static int replace_page(struct vm_area_struct *vma, struct page *page,
+ struct page *kpage, pte_t orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
@@ -681,12 +785,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
pte_t *ptep;
spinlock_t *ptl;
unsigned long addr;
- pgprot_t prot;
int err = -EFAULT;
- prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE);
-
- addr = page_address_in_vma(oldpage, vma);
+ addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
goto out;
@@ -708,15 +809,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
goto out;
}
- get_page(newpage);
- page_add_ksm_rmap(newpage);
+ get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr);
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush(vma, addr, ptep);
- set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot));
+ set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
- page_remove_rmap(oldpage);
- put_page(oldpage);
+ page_remove_rmap(page);
+ put_page(page);
pte_unmap_unlock(ptep, ptl);
err = 0;
@@ -726,32 +827,27 @@ out:
/*
* try_to_merge_one_page - take two pages and merge them into one
- * @vma: the vma that hold the pte pointing into oldpage
- * @oldpage: the page that we want to replace with newpage
- * @newpage: the page that we want to map instead of oldpage
- *
- * Note:
- * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
- * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
+ * @vma: the vma that holds the pte pointing to page
+ * @page: the PageAnon page that we want to replace with kpage
+ * @kpage: the PageKsm page that we want to map instead of page,
+ * or NULL the first time when we want to use page as kpage.
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
static int try_to_merge_one_page(struct vm_area_struct *vma,
- struct page *oldpage,
- struct page *newpage)
+ struct page *page, struct page *kpage)
{
pte_t orig_pte = __pte(0);
int err = -EFAULT;
+ if (page == kpage) /* ksm page forked */
+ return 0;
+
if (!(vma->vm_flags & VM_MERGEABLE))
goto out;
-
- if (!PageAnon(oldpage))
+ if (!PageAnon(page))
goto out;
- get_page(newpage);
- get_page(oldpage);
-
/*
* We need the page lock to read a stable PageSwapCache in
* write_protect_page(). We use trylock_page() instead of
@@ -759,26 +855,39 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
* prefer to continue scanning and merging different pages,
* then come back to this page when it is unlocked.
*/
- if (!trylock_page(oldpage))
- goto out_putpage;
+ if (!trylock_page(page))
+ goto out;
/*
* If this anonymous page is mapped only here, its pte may need
* to be write-protected. If it's mapped elsewhere, all of its
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
- if (write_protect_page(vma, oldpage, &orig_pte)) {
- unlock_page(oldpage);
- goto out_putpage;
+ if (write_protect_page(vma, page, &orig_pte) == 0) {
+ if (!kpage) {
+ /*
+ * While we hold page lock, upgrade page from
+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
+ * stable_tree_insert() will update stable_node.
+ */
+ set_page_stable_node(page, NULL);
+ mark_page_accessed(page);
+ err = 0;
+ } else if (pages_identical(page, kpage))
+ err = replace_page(vma, page, kpage, orig_pte);
}
- unlock_page(oldpage);
- if (pages_identical(oldpage, newpage))
- err = replace_page(vma, oldpage, newpage, orig_pte);
+ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
+ munlock_vma_page(page);
+ if (!PageMlocked(kpage)) {
+ unlock_page(page);
+ lock_page(kpage);
+ mlock_vma_page(kpage);
+ page = kpage; /* for final unlock */
+ }
+ }
-out_putpage:
- put_page(oldpage);
- put_page(newpage);
+ unlock_page(page);
out:
return err;
}
@@ -786,26 +895,31 @@ out:
/*
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
* but no new kernel page is allocated: kpage must already be a ksm page.
+ *
+ * This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
-static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
- unsigned long addr1,
- struct page *page1,
- struct page *kpage)
+static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
+ struct page *page, struct page *kpage)
{
+ struct mm_struct *mm = rmap_item->mm;
struct vm_area_struct *vma;
int err = -EFAULT;
- down_read(&mm1->mmap_sem);
- if (ksm_test_exit(mm1))
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, rmap_item->address);
+ if (!vma || vma->vm_start > rmap_item->address)
goto out;
- vma = find_vma(mm1, addr1);
- if (!vma || vma->vm_start > addr1)
+ err = try_to_merge_one_page(vma, page, kpage);
+ if (err)
goto out;
- err = try_to_merge_one_page(vma, page1, kpage);
+ /* Must get reference to anon_vma while still holding mmap_sem */
+ hold_anon_vma(rmap_item, vma->anon_vma);
out:
- up_read(&mm1->mmap_sem);
+ up_read(&mm->mmap_sem);
return err;
}
@@ -813,109 +927,73 @@ out:
* try_to_merge_two_pages - take two identical pages and prepare them
* to be merged into one page.
*
- * This function returns 0 if we successfully mapped two identical pages
- * into one page, -EFAULT otherwise.
+ * This function returns the kpage if we successfully merged two identical
+ * pages into one ksm page, NULL otherwise.
*
- * Note that this function allocates a new kernel page: if one of the pages
+ * Note that this function upgrades page to ksm page: if one of the pages
* is already a ksm page, try_to_merge_with_ksm_page should be used.
*/
-static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
- struct page *page1, struct mm_struct *mm2,
- unsigned long addr2, struct page *page2)
+static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
+ struct page *page,
+ struct rmap_item *tree_rmap_item,
+ struct page *tree_page)
{
- struct vm_area_struct *vma;
- struct page *kpage;
- int err = -EFAULT;
-
- /*
- * The number of nodes in the stable tree
- * is the number of kernel pages that we hold.
- */
- if (ksm_max_kernel_pages &&
- ksm_max_kernel_pages <= ksm_pages_shared)
- return err;
-
- kpage = alloc_page(GFP_HIGHUSER);
- if (!kpage)
- return err;
-
- down_read(&mm1->mmap_sem);
- if (ksm_test_exit(mm1)) {
- up_read(&mm1->mmap_sem);
- goto out;
- }
- vma = find_vma(mm1, addr1);
- if (!vma || vma->vm_start > addr1) {
- up_read(&mm1->mmap_sem);
- goto out;
- }
-
- copy_user_highpage(kpage, page1, addr1, vma);
- err = try_to_merge_one_page(vma, page1, kpage);
- up_read(&mm1->mmap_sem);
+ int err;
+ err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
if (!err) {
- err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage);
+ err = try_to_merge_with_ksm_page(tree_rmap_item,
+ tree_page, page);
/*
* If that fails, we have a ksm page with only one pte
* pointing to it: so break it.
*/
if (err)
- break_cow(mm1, addr1);
+ break_cow(rmap_item);
}
-out:
- put_page(kpage);
- return err;
+ return err ? NULL : page;
}
/*
- * stable_tree_search - search page inside the stable tree
- * @page: the page that we are searching identical pages to.
- * @page2: pointer into identical page that we are holding inside the stable
- * tree that we have found.
- * @rmap_item: the reverse mapping item
+ * stable_tree_search - search for page inside the stable tree
*
* This function checks if there is a page inside the stable tree
* with identical content to the page that we are scanning right now.
*
- * This function return rmap_item pointer to the identical item if found,
+ * This function returns the stable tree node of identical content if found,
* NULL otherwise.
*/
-static struct rmap_item *stable_tree_search(struct page *page,
- struct page **page2,
- struct rmap_item *rmap_item)
+static struct page *stable_tree_search(struct page *page)
{
struct rb_node *node = root_stable_tree.rb_node;
+ struct stable_node *stable_node;
+
+ stable_node = page_stable_node(page);
+ if (stable_node) { /* ksm page forked */
+ get_page(page);
+ return page;
+ }
while (node) {
- struct rmap_item *tree_rmap_item, *next_rmap_item;
+ struct page *tree_page;
int ret;
- tree_rmap_item = rb_entry(node, struct rmap_item, node);
- while (tree_rmap_item) {
- BUG_ON(!in_stable_tree(tree_rmap_item));
- cond_resched();
- page2[0] = get_ksm_page(tree_rmap_item);
- if (page2[0])
- break;
- next_rmap_item = tree_rmap_item->next;
- remove_rmap_item_from_tree(tree_rmap_item);
- tree_rmap_item = next_rmap_item;
- }
- if (!tree_rmap_item)
+ cond_resched();
+ stable_node = rb_entry(node, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
return NULL;
- ret = memcmp_pages(page, page2[0]);
+ ret = memcmp_pages(page, tree_page);
if (ret < 0) {
- put_page(page2[0]);
+ put_page(tree_page);
node = node->rb_left;
} else if (ret > 0) {
- put_page(page2[0]);
+ put_page(tree_page);
node = node->rb_right;
- } else {
- return tree_rmap_item;
- }
+ } else
+ return tree_page;
}
return NULL;
@@ -925,38 +1003,26 @@ static struct rmap_item *stable_tree_search(struct page *page,
* stable_tree_insert - insert rmap_item pointing to new ksm page
* into the stable tree.
*
- * @page: the page that we are searching identical page to inside the stable
- * tree.
- * @rmap_item: pointer to the reverse mapping item.
- *
- * This function returns rmap_item if success, NULL otherwise.
+ * This function returns the stable tree node just allocated on success,
+ * NULL otherwise.
*/
-static struct rmap_item *stable_tree_insert(struct page *page,
- struct rmap_item *rmap_item)
+static struct stable_node *stable_tree_insert(struct page *kpage)
{
struct rb_node **new = &root_stable_tree.rb_node;
struct rb_node *parent = NULL;
+ struct stable_node *stable_node;
while (*new) {
- struct rmap_item *tree_rmap_item, *next_rmap_item;
struct page *tree_page;
int ret;
- tree_rmap_item = rb_entry(*new, struct rmap_item, node);
- while (tree_rmap_item) {
- BUG_ON(!in_stable_tree(tree_rmap_item));
- cond_resched();
- tree_page = get_ksm_page(tree_rmap_item);
- if (tree_page)
- break;
- next_rmap_item = tree_rmap_item->next;
- remove_rmap_item_from_tree(tree_rmap_item);
- tree_rmap_item = next_rmap_item;
- }
- if (!tree_rmap_item)
+ cond_resched();
+ stable_node = rb_entry(*new, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
return NULL;
- ret = memcmp_pages(page, tree_page);
+ ret = memcmp_pages(kpage, tree_page);
put_page(tree_page);
parent = *new;
@@ -974,22 +1040,24 @@ static struct rmap_item *stable_tree_insert(struct page *page,
}
}
- rmap_item->address |= NODE_FLAG | STABLE_FLAG;
- rmap_item->next = NULL;
- rb_link_node(&rmap_item->node, parent, new);
- rb_insert_color(&rmap_item->node, &root_stable_tree);
+ stable_node = alloc_stable_node();
+ if (!stable_node)
+ return NULL;
- ksm_pages_shared++;
- return rmap_item;
+ rb_link_node(&stable_node->node, parent, new);
+ rb_insert_color(&stable_node->node, &root_stable_tree);
+
+ INIT_HLIST_HEAD(&stable_node->hlist);
+
+ stable_node->kpfn = page_to_pfn(kpage);
+ set_page_stable_node(kpage, stable_node);
+
+ return stable_node;
}
/*
- * unstable_tree_search_insert - search and insert items into the unstable tree.
- *
- * @page: the page that we are going to search for identical page or to insert
- * into the unstable tree
- * @page2: pointer into identical page that was found inside the unstable tree
- * @rmap_item: the reverse mapping item of page
+ * unstable_tree_search_insert - search for identical page,
+ * else insert rmap_item into the unstable tree.
*
* This function searches for a page in the unstable tree identical to the
* page currently being scanned; and if no identical page is found in the
@@ -1001,47 +1069,50 @@ static struct rmap_item *stable_tree_insert(struct page *page,
* This function does both searching and inserting, because they share
* the same walking algorithm in an rbtree.
*/
-static struct rmap_item *unstable_tree_search_insert(struct page *page,
- struct page **page2,
- struct rmap_item *rmap_item)
+static
+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
+ struct page *page,
+ struct page **tree_pagep)
+
{
struct rb_node **new = &root_unstable_tree.rb_node;
struct rb_node *parent = NULL;
while (*new) {
struct rmap_item *tree_rmap_item;
+ struct page *tree_page;
int ret;
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
- page2[0] = get_mergeable_page(tree_rmap_item);
- if (!page2[0])
+ tree_page = get_mergeable_page(tree_rmap_item);
+ if (!tree_page)
return NULL;
/*
- * Don't substitute an unswappable ksm page
- * just for one good swappable forked page.
+ * Don't substitute a ksm page for a forked page.
*/
- if (page == page2[0]) {
- put_page(page2[0]);
+ if (page == tree_page) {
+ put_page(tree_page);
return NULL;
}
- ret = memcmp_pages(page, page2[0]);
+ ret = memcmp_pages(page, tree_page);
parent = *new;
if (ret < 0) {
- put_page(page2[0]);
+ put_page(tree_page);
new = &parent->rb_left;
} else if (ret > 0) {
- put_page(page2[0]);
+ put_page(tree_page);
new = &parent->rb_right;
} else {
+ *tree_pagep = tree_page;
return tree_rmap_item;
}
}
- rmap_item->address |= NODE_FLAG;
+ rmap_item->address |= UNSTABLE_FLAG;
rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
rb_link_node(&rmap_item->node, parent, new);
rb_insert_color(&rmap_item->node, &root_unstable_tree);
@@ -1056,18 +1127,16 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
* the same ksm page.
*/
static void stable_tree_append(struct rmap_item *rmap_item,
- struct rmap_item *tree_rmap_item)
+ struct stable_node *stable_node)
{
- rmap_item->next = tree_rmap_item->next;
- rmap_item->prev = tree_rmap_item;
-
- if (tree_rmap_item->next)
- tree_rmap_item->next->prev = rmap_item;
-
- tree_rmap_item->next = rmap_item;
+ rmap_item->head = stable_node;
rmap_item->address |= STABLE_FLAG;
+ hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
- ksm_pages_sharing++;
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing++;
+ else
+ ksm_pages_shared++;
}
/*
@@ -1081,49 +1150,37 @@ static void stable_tree_append(struct rmap_item *rmap_item,
*/
static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
{
- struct page *page2[1];
struct rmap_item *tree_rmap_item;
+ struct page *tree_page = NULL;
+ struct stable_node *stable_node;
+ struct page *kpage;
unsigned int checksum;
int err;
- if (in_stable_tree(rmap_item))
- remove_rmap_item_from_tree(rmap_item);
+ remove_rmap_item_from_tree(rmap_item);
/* We first start with searching the page inside the stable tree */
- tree_rmap_item = stable_tree_search(page, page2, rmap_item);
- if (tree_rmap_item) {
- if (page == page2[0]) /* forked */
- err = 0;
- else
- err = try_to_merge_with_ksm_page(rmap_item->mm,
- rmap_item->address,
- page, page2[0]);
- put_page(page2[0]);
-
+ kpage = stable_tree_search(page);
+ if (kpage) {
+ err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
* The page was successfully merged:
* add its rmap_item to the stable tree.
*/
- stable_tree_append(rmap_item, tree_rmap_item);
+ lock_page(kpage);
+ stable_tree_append(rmap_item, page_stable_node(kpage));
+ unlock_page(kpage);
}
+ put_page(kpage);
return;
}
/*
- * A ksm page might have got here by fork, but its other
- * references have already been removed from the stable tree.
- * Or it might be left over from a break_ksm which failed
- * when the mem_cgroup had reached its limit: try again now.
- */
- if (PageKsm(page))
- break_cow(rmap_item->mm, rmap_item->address);
-
- /*
- * In case the hash value of the page was changed from the last time we
- * have calculated it, this page to be changed frequely, therefore we
- * don't want to insert it to the unstable tree, and we don't want to
- * waste our time to search if there is something identical to it there.
+ * If the hash value of the page has changed from the last time
+ * we calculated it, this page is changing frequently: therefore we
+ * don't want to insert it in the unstable tree, and we don't want
+ * to waste our time searching for something identical to it there.
*/
checksum = calc_checksum(page);
if (rmap_item->oldchecksum != checksum) {
@@ -1131,21 +1188,27 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
return;
}
- tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item);
+ tree_rmap_item =
+ unstable_tree_search_insert(rmap_item, page, &tree_page);
if (tree_rmap_item) {
- err = try_to_merge_two_pages(rmap_item->mm,
- rmap_item->address, page,
- tree_rmap_item->mm,
- tree_rmap_item->address, page2[0]);
+ kpage = try_to_merge_two_pages(rmap_item, page,
+ tree_rmap_item, tree_page);
+ put_page(tree_page);
/*
* As soon as we merge this page, we want to remove the
* rmap_item of the page we have merged with from the unstable
* tree, and insert it instead as new node in the stable tree.
*/
- if (!err) {
- rb_erase(&tree_rmap_item->node, &root_unstable_tree);
- tree_rmap_item->address &= ~NODE_FLAG;
- ksm_pages_unshared--;
+ if (kpage) {
+ remove_rmap_item_from_tree(tree_rmap_item);
+
+ lock_page(kpage);
+ stable_node = stable_tree_insert(kpage);
+ if (stable_node) {
+ stable_tree_append(tree_rmap_item, stable_node);
+ stable_tree_append(rmap_item, stable_node);
+ }
+ unlock_page(kpage);
/*
* If we fail to insert the page into the stable tree,
@@ -1153,37 +1216,28 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
* to a ksm page left outside the stable tree,
* in which case we need to break_cow on both.
*/
- if (stable_tree_insert(page2[0], tree_rmap_item))
- stable_tree_append(rmap_item, tree_rmap_item);
- else {
- break_cow(tree_rmap_item->mm,
- tree_rmap_item->address);
- break_cow(rmap_item->mm, rmap_item->address);
+ if (!stable_node) {
+ break_cow(tree_rmap_item);
+ break_cow(rmap_item);
}
}
-
- put_page(page2[0]);
}
}
static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
- struct list_head *cur,
+ struct rmap_item **rmap_list,
unsigned long addr)
{
struct rmap_item *rmap_item;
- while (cur != &mm_slot->rmap_list) {
- rmap_item = list_entry(cur, struct rmap_item, link);
- if ((rmap_item->address & PAGE_MASK) == addr) {
- if (!in_stable_tree(rmap_item))
- remove_rmap_item_from_tree(rmap_item);
+ while (*rmap_list) {
+ rmap_item = *rmap_list;
+ if ((rmap_item->address & PAGE_MASK) == addr)
return rmap_item;
- }
if (rmap_item->address > addr)
break;
- cur = cur->next;
+ *rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
- list_del(&rmap_item->link);
free_rmap_item(rmap_item);
}
@@ -1192,7 +1246,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
/* It has already been zeroed */
rmap_item->mm = mm_slot->mm;
rmap_item->address = addr;
- list_add_tail(&rmap_item->link, cur);
+ rmap_item->rmap_list = *rmap_list;
+ *rmap_list = rmap_item;
}
return rmap_item;
}
@@ -1217,8 +1272,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
spin_unlock(&ksm_mmlist_lock);
next_mm:
ksm_scan.address = 0;
- ksm_scan.rmap_item = list_entry(&slot->rmap_list,
- struct rmap_item, link);
+ ksm_scan.rmap_list = &slot->rmap_list;
}
mm = slot->mm;
@@ -1244,10 +1298,10 @@ next_mm:
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
- ksm_scan.rmap_item->link.next,
- ksm_scan.address);
+ ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
- ksm_scan.rmap_item = rmap_item;
+ ksm_scan.rmap_list =
+ &rmap_item->rmap_list;
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
@@ -1263,14 +1317,13 @@ next_mm:
if (ksm_test_exit(mm)) {
ksm_scan.address = 0;
- ksm_scan.rmap_item = list_entry(&slot->rmap_list,
- struct rmap_item, link);
+ ksm_scan.rmap_list = &slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
* because there were no VM_MERGEABLE vmas with such addresses.
*/
- remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
+ remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
@@ -1323,14 +1376,6 @@ static void ksm_do_scan(unsigned int scan_npages)
return;
if (!PageKsm(page) || !in_stable_tree(rmap_item))
cmp_and_merge_page(page, rmap_item);
- else if (page_mapcount(page) == 1) {
- /*
- * Replace now-unshared ksm page by ordinary page.
- */
- break_cow(rmap_item->mm, rmap_item->address);
- remove_rmap_item_from_tree(rmap_item);
- rmap_item->oldchecksum = calc_checksum(page);
- }
put_page(page);
}
}
@@ -1375,7 +1420,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
- VM_MIXEDMAP | VM_SAO))
+ VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
return 0; /* just ignore the advice */
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
@@ -1452,7 +1497,7 @@ void __ksm_exit(struct mm_struct *mm)
spin_lock(&ksm_mmlist_lock);
mm_slot = get_mm_slot(mm);
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
- if (list_empty(&mm_slot->rmap_list)) {
+ if (!mm_slot->rmap_list) {
hlist_del(&mm_slot->link);
list_del(&mm_slot->mm_list);
easy_to_free = 1;
@@ -1473,6 +1518,249 @@ void __ksm_exit(struct mm_struct *mm)
}
}
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct page *new_page;
+
+ unlock_page(page); /* any racers will COW it, not modify it */
+
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (new_page) {
+ copy_user_highpage(new_page, page, address, vma);
+
+ SetPageDirty(new_page);
+ __SetPageUptodate(new_page);
+ SetPageSwapBacked(new_page);
+ __set_page_locked(new_page);
+
+ if (page_evictable(new_page, vma))
+ lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
+ else
+ add_page_to_unevictable_list(new_page);
+ }
+
+ page_cache_release(page);
+ return new_page;
+}
+
+int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
+ unsigned long *vm_flags)
+{
+ struct stable_node *stable_node;
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+ unsigned int mapcount = page_mapcount(page);
+ int referenced = 0;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return 0;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
+ continue;
+
+ referenced += page_referenced_one(page, vma,
+ rmap_item->address, &mapcount, vm_flags);
+ if (!search_new_forks || !mapcount)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ if (!mapcount)
+ goto out;
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return referenced;
+}
+
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return SWAP_FAIL;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = try_to_unmap_one(page, vma,
+ rmap_item->address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page)) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+#ifdef CONFIG_MIGRATION
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return ret;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = rmap_one(page, vma, rmap_item->address, arg);
+ if (ret != SWAP_AGAIN) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+ struct stable_node *stable_node;
+
+ VM_BUG_ON(!PageLocked(oldpage));
+ VM_BUG_ON(!PageLocked(newpage));
+ VM_BUG_ON(newpage->mapping != oldpage->mapping);
+
+ stable_node = page_stable_node(newpage);
+ if (stable_node) {
+ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+ stable_node->kpfn = page_to_pfn(newpage);
+ }
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
+ struct stable_node *stable_node;
+
+ stable_node = rb_entry(node, struct stable_node, node);
+ if (stable_node->kpfn >= start_pfn &&
+ stable_node->kpfn < end_pfn)
+ return stable_node;
+ }
+ return NULL;
+}
+
+static int ksm_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mn = arg;
+ struct stable_node *stable_node;
+
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Keep it very simple for now: just lock out ksmd and
+ * MADV_UNMERGEABLE while any memory is going offline.
+ */
+ mutex_lock(&ksm_thread_mutex);
+ break;
+
+ case MEM_OFFLINE:
+ /*
+ * Most of the work is done by page migration; but there might
+ * be a few stable_nodes left over, still pointing to struct
+ * pages which have been offlined: prune those from the tree.
+ */
+ while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
+ mn->start_pfn + mn->nr_pages)) != NULL)
+ remove_node_from_stable_tree(stable_node);
+ /* fallthrough */
+
+ case MEM_CANCEL_OFFLINE:
+ mutex_unlock(&ksm_thread_mutex);
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -1551,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
/*
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
- * breaking COW to free the unswappable pages_shared (but leaves
- * mm_slots on the list for when ksmd may be set running again).
+ * breaking COW to free the pages_shared (but leaves mm_slots
+ * on the list for when ksmd may be set running again).
*/
mutex_lock(&ksm_thread_mutex);
@@ -1577,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
}
KSM_ATTR(run);
-static ssize_t max_kernel_pages_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- int err;
- unsigned long nr_pages;
-
- err = strict_strtoul(buf, 10, &nr_pages);
- if (err)
- return -EINVAL;
-
- ksm_max_kernel_pages = nr_pages;
-
- return count;
-}
-
-static ssize_t max_kernel_pages_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
-}
-KSM_ATTR(max_kernel_pages);
-
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -1649,7 +1914,6 @@ static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
&run_attr.attr,
- &max_kernel_pages_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
&pages_unshared_attr.attr,
@@ -1669,8 +1933,6 @@ static int __init ksm_init(void)
struct task_struct *ksm_thread;
int err;
- ksm_max_kernel_pages = totalram_pages / 4;
-
err = ksm_slab_init();
if (err)
goto out;
@@ -1698,6 +1960,13 @@ static int __init ksm_init(void)
#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ /*
+ * Choose a high priority since the callback takes ksm_thread_mutex:
+ * later callbacks could only be taking locks which nest within that.
+ */
+ hotplug_memory_notifier(ksm_memory_callback, 100);
+#endif
return 0;
out_free2:
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c31a310aa146..e0c2066495e3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1737,11 +1737,12 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
goto charge_cur_mm;
/*
* A racing thread's fault, or swapoff, may have already updated
- * the pte, and even removed page from swap cache: return success
- * to go on to do_swap_page()'s pte_same() test, which should fail.
+ * the pte, and even removed page from swap cache: in those cases
+ * do_swap_page()'s pte_same() test will fail; but there's also a
+ * KSM case which does need to charge the page.
*/
if (!PageSwapCache(page))
- return 0;
+ goto charge_cur_mm;
mem = try_get_mem_cgroup_from_swapcache(page);
if (!mem)
goto charge_cur_mm;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ac49fef95ab..50d4f8d7024a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -582,10 +582,8 @@ static struct page_state {
{ unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
{ unevict, unevict, "unevictable LRU", me_pagecache_clean},
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
{ mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
{ mlock, mlock, "mlocked LRU", me_pagecache_clean },
-#endif
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean },
diff --git a/mm/memory.c b/mm/memory.c
index 6ab19dd4a199..a54b2c498444 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -572,7 +572,7 @@ out:
* covered by this vma.
*/
-static inline void
+static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
@@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
+
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
@@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
+ return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[2];
+ swp_entry_t entry = (swp_entry_t){0};
again:
rss[1] = rss[0] = 0;
@@ -674,7 +678,10 @@ again:
progress++;
continue;
}
- copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+ entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+ vma, addr, rss);
+ if (entry.val)
+ break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
@@ -684,6 +691,12 @@ again:
add_mm_rss(dst_mm, rss[0], rss[1]);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
+
+ if (entry.val) {
+ if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+ return -ENOMEM;
+ progress = 0;
+ }
if (addr != end)
goto again;
return 0;
@@ -2514,7 +2527,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
- ret = VM_FAULT_OOM;
+ ret = VM_FAULT_SIGBUS;
}
goto out;
}
@@ -2548,6 +2561,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ page = ksm_might_need_to_copy(page, vma, address);
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
goto out_page;
@@ -2910,7 +2929,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
- return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2047465cd27c..030ce8a5bb0e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -27,6 +27,7 @@
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
@@ -71,7 +72,9 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type)
atomic_inc(&page->_count);
}
-void put_page_bootmem(struct page *page)
+/* reference to __meminit __free_pages_bootmem is valid
+ * so use __ref to tell modpost not to generate a warning */
+void __ref put_page_bootmem(struct page *page)
{
int type;
@@ -672,6 +675,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
move_pages--;
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+
} else {
/* Becasue we don't have big zone->lock. we should
check this again here. */
@@ -694,7 +700,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
- ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
out:
return ret;
@@ -747,7 +753,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
-int offline_pages(unsigned long start_pfn,
+static int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
unsigned long pfn, nr_pages, expire;
@@ -849,6 +855,10 @@ repeat:
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
+ if (!node_present_pages(node)) {
+ node_clear_state(node, N_HIGH_MEMORY);
+ kswapd_stop(node);
+ }
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4545d5944243..290fb5bf0440 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -85,10 +85,12 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
/*
- * The check for PageReserved here is important to avoid
- * handling zero pages and other pages that may have been
- * marked special by the system.
- *
- * If the PageReserved would not be checked here then f.e.
- * the location of the zero page could have an influence
- * on MPOL_MF_STRICT, zero pages would be counted for
- * the per node stats, and there would be useless attempts
- * to put zero pages on the migration list.
+ * vm_normal_page() filters out zero pages, but there might
+ * still be PageReserved pages to skip, perhaps in a VDSO.
+ * And we cannot move PageKsm pages sensibly or safely yet.
*/
- if (PageReserved(page))
+ if (PageReserved(page) || PageKsm(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -809,6 +805,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (!isolate_lru_page(page)) {
list_add_tail(&page->lru, pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
}
}
}
@@ -836,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist))
- err = migrate_pages(&pagelist, new_node_page, dest);
+ err = migrate_pages(&pagelist, new_node_page, dest, 0);
return err;
}
@@ -1053,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist))
nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma);
+ (unsigned long)vma, 0);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
@@ -1565,6 +1563,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
}
return zl;
}
+
+/*
+ * init_nodemask_of_mempolicy
+ *
+ * If the current task's mempolicy is "default" [NULL], return 'false'
+ * to indicate default policy. Otherwise, extract the policy nodemask
+ * for 'bind' or 'interleave' policy into the argument nodemask, or
+ * initialize the argument nodemask to contain the single node for
+ * 'preferred' or 'local' policy and return 'true' to indicate presence
+ * of non-default mempolicy.
+ *
+ * We don't bother with reference counting the mempolicy [mpol_get/put]
+ * because the current task is examining it's own mempolicy and a task's
+ * mempolicy is only ever changed by the task itself.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+bool init_nodemask_of_mempolicy(nodemask_t *mask)
+{
+ struct mempolicy *mempolicy;
+ int nid;
+
+ if (!(mask && current->mempolicy))
+ return false;
+
+ mempolicy = current->mempolicy;
+ switch (mempolicy->mode) {
+ case MPOL_PREFERRED:
+ if (mempolicy->flags & MPOL_F_LOCAL)
+ nid = numa_node_id();
+ else
+ nid = mempolicy->v.preferred_node;
+ init_nodemask_of_node(mask, nid);
+ break;
+
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+ *mask = mempolicy->v.nodes;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return true;
+}
#endif
/* Allocate a page in interleaved policy.
diff --git a/mm/migrate.c b/mm/migrate.c
index 7dbcb22316d2..efddbf0926b2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -21,6 +21,7 @@
#include <linux/mm_inline.h>
#include <linux/nsproxy.h>
#include <linux/pagevec.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
@@ -78,8 +79,8 @@ int putback_lru_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
-static void remove_migration_pte(struct vm_area_struct *vma,
- struct page *old, struct page *new)
+static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ unsigned long addr, void *old)
{
struct mm_struct *mm = vma->vm_mm;
swp_entry_t entry;
@@ -88,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long addr = page_address_in_vma(new, vma);
-
- if (addr == -EFAULT)
- return;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
- return;
+ goto out;
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
- return;
+ goto out;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
- return;
+ goto out;
ptep = pte_offset_map(pmd, addr);
if (!is_swap_pte(*ptep)) {
pte_unmap(ptep);
- return;
+ goto out;
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
- goto out;
+ goto unlock;
entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
- goto out;
+ if (!is_migration_entry(entry) ||
+ migration_entry_to_page(entry) != old)
+ goto unlock;
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
@@ -137,58 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, pte);
-
-out:
+unlock:
pte_unmap_unlock(ptep, ptl);
-}
-
-/*
- * Note that remove_file_migration_ptes will only work on regular mappings,
- * Nonlinear mappings do not use migration entries.
- */
-static void remove_file_migration_ptes(struct page *old, struct page *new)
-{
- struct vm_area_struct *vma;
- struct address_space *mapping = new->mapping;
- struct prio_tree_iter iter;
- pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- if (!mapping)
- return;
-
- spin_lock(&mapping->i_mmap_lock);
-
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&mapping->i_mmap_lock);
-}
-
-/*
- * Must hold mmap_sem lock on at least one of the vmas containing
- * the page so that the anon_vma cannot vanish.
- */
-static void remove_anon_migration_ptes(struct page *old, struct page *new)
-{
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- unsigned long mapping;
-
- mapping = (unsigned long)new->mapping;
-
- if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
- return;
-
- /*
- * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
- */
- anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&anon_vma->lock);
+out:
+ return SWAP_AGAIN;
}
/*
@@ -197,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
- if (PageAnon(new))
- remove_anon_migration_ptes(old, new);
- else
- remove_file_migration_ptes(old, new);
+ rmap_walk(new, remove_migration_pte, old);
}
/*
@@ -341,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
if (TestClearPageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
SetPageActive(newpage);
- } else
- unevictable_migrate_page(newpage, page);
+ } else if (TestClearPageUnevictable(page))
+ SetPageUnevictable(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
@@ -361,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
}
mlock_migrate_page(newpage, page);
+ ksm_migrate_page(newpage, page);
ClearPageSwapCache(page);
ClearPagePrivate(page);
@@ -580,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page)
else
rc = fallback_migrate_page(mapping, newpage, page);
- if (!rc) {
+ if (!rc)
remove_migration_ptes(page, newpage);
- } else
+ else
newpage->mapping = NULL;
unlock_page(newpage);
@@ -595,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force)
+ struct page *page, int force, int offlining)
{
int rc = 0;
int *result = NULL;
@@ -621,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
lock_page(page);
}
+ /*
+ * Only memory hotplug's offline_pages() caller has locked out KSM,
+ * and can safely migrate a KSM page. The other cases have skipped
+ * PageKsm along with PageReserved - but it is only now when we have
+ * the page lock that we can be certain it will not go KSM beneath us
+ * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
+ * its pagecount raised, but only here do we take the page lock which
+ * serializes that).
+ */
+ if (PageKsm(page) && !offlining) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
/* charge against new page */
charge = mem_cgroup_prepare_migration(page, &mem);
if (charge == -ENOMEM) {
@@ -737,7 +699,7 @@ move_newpage:
* Return: Number of pages not migrated or error code.
*/
int migrate_pages(struct list_head *from,
- new_page_t get_new_page, unsigned long private)
+ new_page_t get_new_page, unsigned long private, int offlining)
{
int retry = 1;
int nr_failed = 0;
@@ -746,13 +708,6 @@ int migrate_pages(struct list_head *from,
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
- unsigned long flags;
-
- local_irq_save(flags);
- list_for_each_entry(page, from, lru)
- __inc_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- local_irq_restore(flags);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
@@ -764,7 +719,7 @@ int migrate_pages(struct list_head *from,
cond_resched();
rc = unmap_and_move(get_new_page, private,
- page, pass > 2);
+ page, pass > 2, offlining);
switch(rc) {
case -ENOMEM:
@@ -860,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
if (!page)
goto set_status;
- if (PageReserved(page)) /* Check for zero page */
+ /* Use PageReserved to check for zero page */
+ if (PageReserved(page) || PageKsm(page))
goto put_and_set;
pp->page = page;
@@ -878,8 +834,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set;
err = isolate_lru_page(page);
- if (!err)
+ if (!err) {
list_add_tail(&page->lru, &pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ }
put_and_set:
/*
* Either remove the duplicate refcount from
@@ -894,7 +853,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist))
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm);
+ (unsigned long)pm, 0);
up_read(&mm->mmap_sem);
return err;
@@ -1015,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
err = -ENOENT;
/* Use PageReserved to check for zero page */
- if (!page || PageReserved(page))
+ if (!page || PageReserved(page) || PageKsm(page))
goto set_status;
err = page_to_nid(page);
@@ -1044,7 +1003,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
int err;
for (i = 0; i < nr_pages; i += chunk_nr) {
- if (chunk_nr + i > nr_pages)
+ if (chunk_nr > nr_pages - i)
chunk_nr = nr_pages - i;
err = copy_from_user(chunk_pages, &pages[i],
diff --git a/mm/mincore.c b/mm/mincore.c
index 8cb508f84ea4..7a3436ef39eb 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,6 +14,7 @@
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -72,6 +73,42 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
if (!vma || addr < vma->vm_start)
return -ENOMEM;
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma)) {
+ struct hstate *h;
+ unsigned long nr_huge;
+ unsigned char present;
+
+ i = 0;
+ nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
+ h = hstate_vma(vma);
+ nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
+ - (addr >> huge_page_shift(h)) + 1;
+ nr_huge = min(nr_huge,
+ (vma->vm_end - addr) >> huge_page_shift(h));
+ while (1) {
+ /* hugepage always in RAM for now,
+ * but generally it needs to be check */
+ ptep = huge_pte_offset(current->mm,
+ addr & huge_page_mask(h));
+ present = !!(ptep &&
+ !huge_pte_none(huge_ptep_get(ptep)));
+ while (1) {
+ vec[i++] = present;
+ addr += PAGE_SIZE;
+ /* reach buffer limit */
+ if (i == nr)
+ return nr;
+ /* check hugepage border */
+ if (!((addr & ~huge_page_mask(h))
+ >> PAGE_SHIFT))
+ break;
+ }
+ }
+ return nr;
+ }
+#endif
+
/*
* Calculate how many pages there are left in the last level of the
* PTE array for our address.
diff --git a/mm/mlock.c b/mm/mlock.c
index bd6f0e466f6c..2b8335a89400 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page)
}
}
-/*
- * called from munlock()/munmap() path with page supposedly on the LRU.
+/**
+ * munlock_vma_page - munlock a vma page
+ * @page - page to be unlocked
*
- * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
- * [in try_to_munlock()] and then attempt to isolate the page. We must
- * isolate the page to keep others from messing with its unevictable
- * and mlocked state while trying to munlock. However, we pre-clear the
- * mlocked state anyway as we might lose the isolation race and we might
- * not get another chance to clear PageMlocked. If we successfully
- * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
- * mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
- * perhaps redundantly.
- * If we lose the isolation race, and the page is mapped by other VM_LOCKED
- * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
- * either of which will restore the PageMlocked state by calling
- * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+ * munlock()ed or munmap()ed, we want to check whether other vmas hold the
+ * page locked so that we can leave it on the unevictable lru list and not
+ * bother vmscan with it. However, to walk the page's rmap list in
+ * try_to_munlock() we must isolate the page from the LRU. If some other
+ * task has removed the page from the LRU, we won't be able to do that.
+ * So we clear the PageMlocked as we might not get another chance. If we
+ * can't isolate the page, we leave it for putback_lru_page() and vmscan
+ * [page_referenced()/try_to_unmap()] to deal with.
*/
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page)
/*
* did try_to_unlock() succeed or punt?
*/
- if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+ if (ret != SWAP_MLOCK)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page);
} else {
/*
- * We lost the race. let try_to_unmap() deal
- * with it. At least we get the page state and
- * mlock stats right. However, page is still on
- * the noreclaim list. We'll fix that up when
- * the page is eventually freed or we scan the
- * noreclaim list.
+ * Some other task has removed the page from the LRU.
+ * putback_lru_page() will take care of removing the
+ * page from the unevictable list, if necessary.
+ * vmscan [page_referenced()] will move the page back
+ * to the unevictable list if some other vma has it
+ * mlocked.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
diff --git a/mm/mmap.c b/mm/mmap.c
index ed70a68e882a..d9c77b2dbe9d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1198,8 +1198,20 @@ munmap_back:
goto free_vma;
}
- if (vma_wants_writenotify(vma))
+ if (vma_wants_writenotify(vma)) {
+ pgprot_t pprot = vma->vm_page_prot;
+
+ /* Can vma->vm_page_prot have changed??
+ *
+ * Answer: Yes, drivers may have changed it in their
+ * f_op->mmap method.
+ *
+ * Ensures that vmas marked as uncached stay that way.
+ */
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
+ if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ }
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -1811,10 +1823,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
+ * munmap path where it doesn't make sense to fail.
*/
-int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
struct mempolicy *pol;
@@ -1824,9 +1836,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
~(huge_page_mask(hstate_vma(vma)))))
return -EINVAL;
- if (mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
return -ENOMEM;
@@ -1866,6 +1875,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
return 0;
}
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ return __split_vma(mm, vma, addr, new_below);
+}
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -1901,7 +1923,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
- int error = split_vma(mm, vma, start, 0);
+ int error;
+
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ error = __split_vma(mm, vma, start, 0);
if (error)
return error;
prev = vma;
@@ -1910,7 +1942,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
- int error = split_vma(mm, last, end, 1);
+ int error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 9876fa0c3ad3..8687973462bb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1143,9 +1143,6 @@ static int do_mmap_private(struct vm_area_struct *vma,
if (ret < rlen)
memset(base + ret, 0, rlen - ret);
- } else {
- /* if it's an anonymous mapping, then just clear it */
- memset(base, 0, rlen);
}
return 0;
@@ -1343,6 +1340,11 @@ unsigned long do_mmap_pgoff(struct file *file,
goto error_just_free;
add_nommu_region(region);
+ /* clear anonymous mappings that don't ask for uninitialized data */
+ if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
+ memset((void *)region->vm_start, 0,
+ region->vm_end - region->vm_start);
+
/* okay... we have a mapping; now we have to register it */
result = vma->vm_start;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ea2147dabba6..492c98624fc1 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -337,6 +337,21 @@ static void dump_tasks(const struct mem_cgroup *mem)
} while_each_thread(g, p);
}
+static void dump_header(gfp_t gfp_mask, int order, struct mem_cgroup *mem)
+{
+ pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
+ "oom_adj=%d\n",
+ current->comm, gfp_mask, order, current->signal->oom_adj);
+ task_lock(current);
+ cpuset_print_task_mems_allowed(current);
+ task_unlock(current);
+ dump_stack();
+ mem_cgroup_print_oom_info(mem, current);
+ show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
+}
+
/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
@@ -395,20 +410,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
{
struct task_struct *c;
- if (printk_ratelimit()) {
- printk(KERN_WARNING "%s invoked oom-killer: "
- "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
- current->comm, gfp_mask, order,
- current->signal->oom_adj);
- task_lock(current);
- cpuset_print_task_mems_allowed(current);
- task_unlock(current);
- dump_stack();
- mem_cgroup_print_oom_info(mem, current);
- show_mem();
- if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
- }
+ if (printk_ratelimit())
+ dump_header(gfp_mask, order, mem);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -544,6 +547,7 @@ retry:
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
read_unlock(&tasklist_lock);
+ dump_header(gfp_mask, order, NULL);
panic("Out of memory and no killable processes...\n");
}
@@ -609,8 +613,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
/* Got some memory back in the last second. */
return;
- if (sysctl_panic_on_oom == 2)
+ if (sysctl_panic_on_oom == 2) {
+ dump_header(gfp_mask, order, NULL);
panic("out of memory. Compulsory panic_on_oom is selected.\n");
+ }
/*
* Check if there were limitations on the allocation (only relevant for
@@ -626,8 +632,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
break;
case CONSTRAINT_NONE:
- if (sysctl_panic_on_oom)
+ if (sysctl_panic_on_oom) {
+ dump_header(gfp_mask, order, NULL);
panic("out of memory. panic_on_oom is selected\n");
+ }
/* Fall-through */
case CONSTRAINT_CPUSET:
__out_of_memory(gfp_mask, order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41e..59d2e88fb47c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++;
}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
-#else
-static void free_page_mlock(struct page *page) { }
-#endif
static inline int free_pages_check(struct page *page)
{
diff --git a/mm/page_io.c b/mm/page_io.c
index c6f3e5071de3..a19af956ee1b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,20 +19,15 @@
#include <linux/writeback.h>
#include <asm/pgtable.h>
-static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
+static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
{
struct bio *bio;
bio = bio_alloc(gfp_flags, 1);
if (bio) {
- struct swap_info_struct *sis;
- swp_entry_t entry = { .val = index, };
-
- sis = get_swap_info_struct(swp_type(entry));
- bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
- (PAGE_SIZE >> 9);
- bio->bi_bdev = sis->bdev;
+ bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
+ bio->bi_sector <<= PAGE_SHIFT - 9;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = PAGE_SIZE;
bio->bi_io_vec[0].bv_offset = 0;
@@ -102,8 +97,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
unlock_page(page);
goto out;
}
- bio = get_swap_bio(GFP_NOIO, page_private(page), page,
- end_swap_bio_write);
+ bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
unlock_page(page);
@@ -127,8 +121,7 @@ int swap_readpage(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
- bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
- end_swap_bio_read);
+ bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
ret = -ENOMEM;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d5878bed7841..7b47a57b6646 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,6 +1,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -107,6 +108,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd_t *pgd;
unsigned long next;
int err = 0;
+ struct vm_area_struct *vma;
if (addr >= end)
return err;
@@ -117,11 +119,38 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
+
+ /*
+ * handle hugetlb vma individually because pagetable walk for
+ * the hugetlb page is dependent on the architecture and
+ * we can't handled it in the same manner as non-huge pages.
+ */
+ vma = find_vma(walk->mm, addr);
+#ifdef CONFIG_HUGETLB_PAGE
+ if (vma && is_vm_hugetlb_page(vma)) {
+ pte_t *pte;
+ struct hstate *hs;
+
+ if (vma->vm_end < next)
+ next = vma->vm_end;
+ hs = hstate_vma(vma);
+ pte = huge_pte_offset(walk->mm,
+ addr & huge_page_mask(hs));
+ if (pte && !huge_pte_none(huge_ptep_get(pte))
+ && walk->hugetlb_entry)
+ err = walk->hugetlb_entry(pte, addr,
+ next, walk);
+ if (err)
+ break;
+ continue;
+ }
+#endif
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
+ pgd++;
continue;
}
if (walk->pgd_entry)
@@ -131,7 +160,8 @@ int walk_page_range(unsigned long addr, unsigned long end,
err = walk_pud_range(pgd, addr, next, walk);
if (err)
break;
- } while (pgd++, addr = next, addr != end);
+ pgd++;
+ } while (addr = next, addr != end);
return err;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 5adfc268b408..442010cc91c6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,8 +46,6 @@
*
* To use this allocator, arch code should do the followings.
*
- * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
- *
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
* different from the default
@@ -74,6 +72,7 @@
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
+#include <asm/io.h>
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
}
EXPORT_SYMBOL_GPL(free_percpu);
+/**
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
+ * Given @addr which is dereferenceable address obtained via one of
+ * percpu access macros, this function translates it into its physical
+ * address. The caller is responsible for ensuring @addr stays valid
+ * until this function finishes.
+ *
+ * RETURNS:
+ * The physical address for @addr.
+ */
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ if ((unsigned long)addr < VMALLOC_START ||
+ (unsigned long)addr >= VMALLOC_END)
+ return __pa(addr);
+ else
+ return page_to_phys(vmalloc_to_page(addr));
+}
+
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
size_t reserved_size,
ssize_t *dyn_sizep)
diff --git a/mm/rmap.c b/mm/rmap.c
index dd43373a483f..98135dbd25ba 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
@@ -67,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
}
-static inline void anon_vma_free(struct anon_vma *anon_vma)
+void anon_vma_free(struct anon_vma *anon_vma)
{
kmem_cache_free(anon_vma_cachep, anon_vma);
}
@@ -171,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
list_del(&vma->anon_vma_node);
/* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head);
+ empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
spin_unlock(&anon_vma->lock);
if (empty)
@@ -183,6 +184,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
+ ksm_refcount_init(anon_vma);
INIT_LIST_HEAD(&anon_vma->head);
}
@@ -202,8 +204,8 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long) page->mapping;
- if (!(anon_mapping & PAGE_MAPPING_ANON))
+ anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
+ if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
@@ -248,8 +250,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
- if ((void *)vma->anon_vma !=
- (void *)page->mapping - PAGE_MAPPING_ANON)
+ if (vma->anon_vma != page_anon_vma(page))
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
@@ -337,21 +338,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
* Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
-static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma,
- unsigned int *mapcount,
- unsigned long *vm_flags)
+int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, unsigned int *mapcount,
+ unsigned long *vm_flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int referenced = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -388,9 +383,10 @@ static int page_referenced_one(struct page *page,
out_unmap:
(*mapcount)--;
pte_unmap_unlock(pte, ptl);
-out:
+
if (referenced)
*vm_flags |= vma->vm_flags;
+out:
return referenced;
}
@@ -409,6 +405,9 @@ static int page_referenced_anon(struct page *page,
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -416,7 +415,7 @@ static int page_referenced_anon(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -474,6 +473,9 @@ static int page_referenced_file(struct page *page,
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -481,7 +483,7 @@ static int page_referenced_file(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -507,46 +509,47 @@ int page_referenced(struct page *page,
unsigned long *vm_flags)
{
int referenced = 0;
+ int we_locked = 0;
if (TestClearPageReferenced(page))
referenced++;
*vm_flags = 0;
- if (page_mapped(page) && page->mapping) {
- if (PageAnon(page))
+ if (page_mapped(page) && page_rmapping(page)) {
+ if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+ we_locked = trylock_page(page);
+ if (!we_locked) {
+ referenced++;
+ goto out;
+ }
+ }
+ if (unlikely(PageKsm(page)))
+ referenced += page_referenced_ksm(page, mem_cont,
+ vm_flags);
+ else if (PageAnon(page))
referenced += page_referenced_anon(page, mem_cont,
vm_flags);
- else if (is_locked)
+ else if (page->mapping)
referenced += page_referenced_file(page, mem_cont,
vm_flags);
- else if (!trylock_page(page))
- referenced++;
- else {
- if (page->mapping)
- referenced += page_referenced_file(page,
- mem_cont, vm_flags);
+ if (we_locked)
unlock_page(page);
- }
}
-
+out:
if (page_test_and_clear_young(page))
referenced++;
return referenced;
}
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
goto out;
@@ -578,8 +581,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED)
- ret += page_mkclean_one(page, vma);
+ if (vma->vm_flags & VM_SHARED) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret += page_mkclean_one(page, vma, address);
+ }
}
spin_unlock(&mapping->i_mmap_lock);
return ret;
@@ -620,14 +627,7 @@ static void __page_set_anon_rmap(struct page *page,
BUG_ON(!anon_vma);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
-
page->index = linear_page_index(vma, address);
-
- /*
- * nr_mapped state can be updated without turning off
- * interrupts because it is not modified via interrupt.
- */
- __inc_zone_page_state(page, NR_ANON_PAGES);
}
/**
@@ -665,14 +665,23 @@ static void __page_check_anon_rmap(struct page *page,
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock and the page must be locked.
+ * The caller needs to hold the pte lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that PageAnon is not being upgraded racily to PageKsm
+ * (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ int first = atomic_inc_and_test(&page->_mapcount);
+ if (first)
+ __inc_zone_page_state(page, NR_ANON_PAGES);
+ if (unlikely(PageKsm(page)))
+ return;
+
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- if (atomic_inc_and_test(&page->_mapcount))
+ if (first)
__page_set_anon_rmap(page, vma, address);
else
__page_check_anon_rmap(page, vma, address);
@@ -694,6 +703,7 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
+ __inc_zone_page_state(page, NR_ANON_PAGES);
__page_set_anon_rmap(page, vma, address);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -760,20 +770,15 @@ void page_remove_rmap(struct page *page)
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- enum ttu_flags flags)
+int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, enum ttu_flags flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -784,10 +789,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* skipped over this mm) then we should reactivate it.
*/
if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- ret = SWAP_MLOCK;
+ if (vma->vm_flags & VM_LOCKED)
+ goto out_mlock;
+
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
- }
}
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -822,7 +828,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0) {
+ set_pte_at(mm, address, pte, pteval);
+ ret = SWAP_FAIL;
+ goto out_unmap;
+ }
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
@@ -849,7 +859,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
} else
dec_mm_counter(mm, file_rss);
-
page_remove_rmap(page);
page_cache_release(page);
@@ -857,6 +866,27 @@ out_unmap:
pte_unmap_unlock(pte, ptl);
out:
return ret;
+
+out_mlock:
+ pte_unmap_unlock(pte, ptl);
+
+
+ /*
+ * We need mmap_sem locking, Otherwise VM_LOCKED check makes
+ * unstable result and race. Plus, We can't wait here because
+ * we now hold anon_vma->lock or mapping->i_mmap_lock.
+ * if trylock failed, the page remain in evictable lru and later
+ * vmscan could retry to move the page to unevictable lru if the
+ * page is actually mlocked.
+ */
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (vma->vm_flags & VM_LOCKED) {
+ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
+ }
+ up_read(&vma->vm_mm->mmap_sem);
+ }
+ return ret;
}
/*
@@ -922,11 +952,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
/*
- * MLOCK_PAGES => feature is configured.
- * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+ * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
- if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -976,29 +1005,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
}
-/*
- * common handling for pages mapped in VM_LOCKED vmas
- */
-static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
-{
- int mlocked = 0;
-
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
- if (vma->vm_flags & VM_LOCKED) {
- mlock_vma_page(page);
- mlocked++; /* really mlocked the page */
- }
- up_read(&vma->vm_mm->mmap_sem);
- }
- return mlocked;
-}
-
/**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method
* @page: the page to unmap/unlock
- * @unlock: request for unlock rather than unmap [unlikely]
- * @migration: unmapping for migration - ignored if @unlock
+ * @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
@@ -1014,42 +1025,22 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
- unsigned int mlocked = 0;
int ret = SWAP_AGAIN;
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all unlocked vmas */
- ret = SWAP_MLOCK; /* saw at least one mlocked vma */
- } else {
- ret = try_to_unmap_one(page, vma, flags);
- if (ret == SWAP_FAIL || !page_mapped(page))
- break;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ break;
}
page_unlock_anon_vma(anon_vma);
-
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
-
return ret;
}
@@ -1079,48 +1070,30 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
- unsigned int mlocked = 0;
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK;
- } else {
- ret = try_to_unmap_one(page, vma, flags);
- if (ret == SWAP_FAIL || !page_mapped(page))
- goto out;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ goto out;
}
- if (mlocked)
+ if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
- if (list_empty(&mapping->i_mmap_nonlinear))
+ /*
+ * We don't bother to try to find the munlocked page in nonlinears.
+ * It's costly. Instead, later, page reclaim logic may call
+ * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
+ */
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!(vma->vm_flags & VM_LOCKED))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK; /* leave mlocked == 0 */
- goto out; /* no need to look further */
- }
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
@@ -1153,16 +1126,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
- ret = try_to_unmap_cluster(cursor, &mapcount,
- vma, page);
- if (ret == SWAP_MLOCK)
- mlocked = 2; /* to return below */
+ if (try_to_unmap_cluster(cursor, &mapcount,
+ vma, page) == SWAP_MLOCK)
+ ret = SWAP_MLOCK;
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
@@ -1183,10 +1152,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
vma->vm_private_data = NULL;
out:
spin_unlock(&mapping->i_mmap_lock);
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
return ret;
}
@@ -1210,7 +1175,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
BUG_ON(!PageLocked(page));
- if (PageAnon(page))
+ if (unlikely(PageKsm(page)))
+ ret = try_to_unmap_ksm(page, flags);
+ else if (PageAnon(page))
ret = try_to_unmap_anon(page, flags);
else
ret = try_to_unmap_file(page, flags);
@@ -1229,17 +1196,98 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
*
* Return values are:
*
- * SWAP_SUCCESS - no vma's holding page mlocked.
+ * SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
+ * SWAP_FAIL - page cannot be located at present
* SWAP_MLOCK - page is now mlocked.
*/
int try_to_munlock(struct page *page)
{
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
- if (PageAnon(page))
+ if (unlikely(PageKsm(page)))
+ return try_to_unmap_ksm(page, TTU_MUNLOCK);
+ else if (PageAnon(page))
return try_to_unmap_anon(page, TTU_MUNLOCK);
else
return try_to_unmap_file(page, TTU_MUNLOCK);
}
+#ifdef CONFIG_MIGRATION
+/*
+ * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ int ret = SWAP_AGAIN;
+
+ /*
+ * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+ * because that depends on page_mapped(); but not all its usages
+ * are holding mmap_sem, which also gave the necessary guarantee
+ * (that this anon_vma's slab has not already been destroyed).
+ * This needs to be reviewed later: avoiding page_lock_anon_vma()
+ * is risky, and currently limits the usefulness of rmap_walk().
+ */
+ anon_vma = page_anon_vma(page);
+ if (!anon_vma)
+ return ret;
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ return ret;
+}
+
+static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct address_space *mapping = page->mapping;
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ int ret = SWAP_AGAIN;
+
+ if (!mapping)
+ return ret;
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ /*
+ * No nonlinear handling: being always shared, nonlinear vmas
+ * never contain migration ptes. Decide what to do about this
+ * limitation to linear when we need rmap_walk() on nonlinear.
+ */
+ spin_unlock(&mapping->i_mmap_lock);
+ return ret;
+}
+
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ VM_BUG_ON(!PageLocked(page));
+
+ if (unlikely(PageKsm(page)))
+ return rmap_walk_ksm(page, rmap_one, arg);
+ else if (PageAnon(page))
+ return rmap_walk_anon(page, rmap_one, arg);
+ else
+ return rmap_walk_file(page, rmap_one, arg);
+}
+#endif /* CONFIG_MIGRATION */
diff --git a/mm/shmem.c b/mm/shmem.c
index 356dd99566ec..4fb41c83daca 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
goto out;
}
mutex_unlock(&shmem_swaplist_mutex);
-out: return found; /* 0 or 1 or -ENOMEM */
+ /*
+ * Can some race bring us here? We've been holding page lock,
+ * so I think not; but would rather try again later than BUG()
+ */
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ return (found < 0) ? found : 0;
}
/*
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
else
inode = NULL;
spin_unlock(&info->lock);
- swap_duplicate(swap);
+ swap_shmem_alloc(swap);
BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */
swap_writepage(page, wbc);
diff --git a/mm/slab.c b/mm/slab.c
index a6c9166996a9..3f4822938f46 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
-static DEFINE_PER_CPU(struct delayed_work, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
* objects freed on different nodes from which they were allocated) and the
* flushing of remote pcps by calling drain_node_pages.
*/
-static DEFINE_PER_CPU(unsigned long, reap_node);
+static DEFINE_PER_CPU(unsigned long, slab_reap_node);
static void init_reap_node(int cpu)
{
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
if (node == MAX_NUMNODES)
node = first_node(node_online_map);
- per_cpu(reap_node, cpu) = node;
+ per_cpu(slab_reap_node, cpu) = node;
}
static void next_reap_node(void)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
node = next_node(node, node_online_map);
if (unlikely(node >= MAX_NUMNODES))
node = first_node(node_online_map);
- __get_cpu_var(reap_node) = node;
+ __get_cpu_var(slab_reap_node) = node;
}
#else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
*/
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
*/
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
* anything expensive but will only modify reap_work
* and reschedule the timer.
*/
- cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
+ cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
/* Now the cache_reaper is guaranteed to be not running. */
- per_cpu(reap_work, cpu).work.func = NULL;
+ per_cpu(slab_reap_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
@@ -3578,7 +3578,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
@@ -3641,7 +3641,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
@@ -3669,7 +3669,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
return ret;
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
@@ -3689,7 +3689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return __do_kmalloc_node(size, flags, node, NULL);
}
EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
#endif /* CONFIG_NUMA */
/**
@@ -3721,7 +3721,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
diff --git a/mm/slub.c b/mm/slub.c
index da0ce55965dc..8d71aaf888d7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, _RET_IP_);
@@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9c590eef7912..6c0585b16418 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
@@ -35,11 +36,15 @@
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
+static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
+ unsigned char);
+static void free_swap_count_continuations(struct swap_info_struct *);
+static sector_t map_swap_entry(swp_entry_t, struct block_device**);
+
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
long nr_swap_pages;
long total_swap_pages;
-static int swap_overflow;
static int least_priority;
static const char Bad_file[] = "Bad swap file entry ";
@@ -49,42 +54,20 @@ static const char Unused_offset[] = "Unused swap offset entry ";
static struct swap_list_t swap_list = {-1, -1};
-static struct swap_info_struct swap_info[MAX_SWAPFILES];
+static struct swap_info_struct *swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
-/* For reference count accounting in swap_map */
-/* enum for swap_map[] handling. internal use only */
-enum {
- SWAP_MAP = 0, /* ops for reference from swap users */
- SWAP_CACHE, /* ops for reference from swap cache */
-};
-
-static inline int swap_count(unsigned short ent)
-{
- return ent & SWAP_COUNT_MASK;
-}
-
-static inline bool swap_has_cache(unsigned short ent)
+static inline unsigned char swap_count(unsigned char ent)
{
- return !!(ent & SWAP_HAS_CACHE);
+ return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
-static inline unsigned short encode_swapmap(int count, bool has_cache)
-{
- unsigned short ret = count;
-
- if (has_cache)
- return SWAP_HAS_CACHE | ret;
- return ret;
-}
-
-/* returnes 1 if swap entry is freed */
+/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
- int type = si - swap_info;
- swp_entry_t entry = swp_entry(type, offset);
+ swp_entry_t entry = swp_entry(si->type, offset);
struct page *page;
int ret = 0;
@@ -120,7 +103,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
down_read(&swap_unplug_sem);
entry.val = page_private(page);
if (PageSwapCache(page)) {
- struct block_device *bdev = swap_info[swp_type(entry)].bdev;
+ struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
struct backing_dev_info *bdi;
/*
@@ -146,23 +129,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
+ sector_t start_block;
+ sector_t nr_blocks;
int err = 0;
- list_for_each_entry(se, &si->extent_list, list) {
- sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
- sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
+ /* Do not discard the swap header page! */
+ se = &si->first_swap_extent;
+ start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
+ nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
+ if (nr_blocks) {
+ err = blkdev_issue_discard(si->bdev, start_block,
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+ if (err)
+ return err;
+ cond_resched();
+ }
- if (se->start_page == 0) {
- /* Do not discard the swap header page! */
- start_block += 1 << (PAGE_SHIFT - 9);
- nr_blocks -= 1 << (PAGE_SHIFT - 9);
- if (!nr_blocks)
- continue;
- }
+ list_for_each_entry(se, &si->first_swap_extent.list, list) {
+ start_block = se->start_block << (PAGE_SHIFT - 9);
+ nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- DISCARD_FL_BARRIER);
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
if (err)
break;
@@ -201,14 +189,11 @@ static void discard_swap_cluster(struct swap_info_struct *si,
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO,
- DISCARD_FL_BARRIER))
+ nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
break;
}
lh = se->list.next;
- if (lh == &si->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
}
}
@@ -223,7 +208,7 @@ static int wait_for_discard(void *word)
#define LATENCY_LIMIT 256
static inline unsigned long scan_swap_map(struct swap_info_struct *si,
- int cache)
+ unsigned char usage)
{
unsigned long offset;
unsigned long scan_base;
@@ -354,10 +339,7 @@ checks:
si->lowest_bit = si->max;
si->highest_bit = 0;
}
- if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
- si->swap_map[offset] = encode_swapmap(0, true);
- else /* at suspend */
- si->swap_map[offset] = encode_swapmap(1, false);
+ si->swap_map[offset] = usage;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
@@ -467,10 +449,10 @@ swp_entry_t get_swap_page(void)
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
- si = swap_info + type;
+ si = swap_info[type];
next = si->next;
if (next < 0 ||
- (!wrapped && si->prio != swap_info[next].prio)) {
+ (!wrapped && si->prio != swap_info[next]->prio)) {
next = swap_list.head;
wrapped++;
}
@@ -482,7 +464,7 @@ swp_entry_t get_swap_page(void)
swap_list.next = next;
/* This is called for allocating swap entry for cache */
- offset = scan_swap_map(si, SWAP_CACHE);
+ offset = scan_swap_map(si, SWAP_HAS_CACHE);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -503,11 +485,11 @@ swp_entry_t get_swap_page_of_type(int type)
pgoff_t offset;
spin_lock(&swap_lock);
- si = swap_info + type;
- if (si->flags & SWP_WRITEOK) {
+ si = swap_info[type];
+ if (si && (si->flags & SWP_WRITEOK)) {
nr_swap_pages--;
/* This is called for allocating swap entry, not cache */
- offset = scan_swap_map(si, SWAP_MAP);
+ offset = scan_swap_map(si, 1);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -518,9 +500,9 @@ swp_entry_t get_swap_page_of_type(int type)
return (swp_entry_t) {0};
}
-static struct swap_info_struct * swap_info_get(swp_entry_t entry)
+static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
if (!entry.val)
@@ -528,7 +510,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry)
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
- p = & swap_info[type];
+ p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
@@ -554,41 +536,56 @@ out:
return NULL;
}
-static int swap_entry_free(struct swap_info_struct *p,
- swp_entry_t ent, int cache)
+static unsigned char swap_entry_free(struct swap_info_struct *p,
+ swp_entry_t entry, unsigned char usage)
{
- unsigned long offset = swp_offset(ent);
- int count = swap_count(p->swap_map[offset]);
- bool has_cache;
+ unsigned long offset = swp_offset(entry);
+ unsigned char count;
+ unsigned char has_cache;
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
- if (cache == SWAP_MAP) { /* dropping usage count of swap */
- if (count < SWAP_MAP_MAX) {
- count--;
- p->swap_map[offset] = encode_swapmap(count, has_cache);
- }
- } else { /* dropping swap cache flag */
+ if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
- p->swap_map[offset] = encode_swapmap(count, false);
-
+ has_cache = 0;
+ } else if (count == SWAP_MAP_SHMEM) {
+ /*
+ * Or we could insist on shmem.c using a special
+ * swap_shmem_free() and free_shmem_swap_and_cache()...
+ */
+ count = 0;
+ } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
+ if (count == COUNT_CONTINUED) {
+ if (swap_count_continued(p, offset, count))
+ count = SWAP_MAP_MAX | COUNT_CONTINUED;
+ else
+ count = SWAP_MAP_MAX;
+ } else
+ count--;
}
- /* return code. */
- count = p->swap_map[offset];
+
+ if (!count)
+ mem_cgroup_uncharge_swap(entry);
+
+ usage = count | has_cache;
+ p->swap_map[offset] = usage;
+
/* free if no reference */
- if (!count) {
+ if (!usage) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = p - swap_info;
+ if (swap_list.next >= 0 &&
+ p->prio > swap_info[swap_list.next]->prio)
+ swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
}
- if (!swap_count(count))
- mem_cgroup_uncharge_swap(ent);
- return count;
+
+ return usage;
}
/*
@@ -597,11 +594,11 @@ static int swap_entry_free(struct swap_info_struct *p,
*/
void swap_free(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, entry, SWAP_MAP);
+ swap_entry_free(p, entry, 1);
spin_unlock(&swap_lock);
}
}
@@ -612,26 +609,21 @@ void swap_free(swp_entry_t entry)
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
- int ret;
+ unsigned char count;
p = swap_info_get(entry);
if (p) {
- ret = swap_entry_free(p, entry, SWAP_CACHE);
- if (page) {
- bool swapout;
- if (ret)
- swapout = true; /* the end of swap out */
- else
- swapout = false; /* no more swap users! */
- mem_cgroup_uncharge_swapcache(page, entry, swapout);
- }
+ count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
+ if (page)
+ mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&swap_lock);
}
- return;
}
/*
* How many references to page are currently swapped out?
+ * This does not give an exact answer when swap count is continued,
+ * but does include the high COUNT_CONTINUED flag to allow for that.
*/
static inline int page_swapcount(struct page *page)
{
@@ -659,6 +651,8 @@ int reuse_swap_page(struct page *page)
int count;
VM_BUG_ON(!PageLocked(page));
+ if (unlikely(PageKsm(page)))
+ return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
@@ -667,7 +661,7 @@ int reuse_swap_page(struct page *page)
SetPageDirty(page);
}
}
- return count == 1;
+ return count <= 1;
}
/*
@@ -704,7 +698,7 @@ int free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
+ if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
@@ -741,14 +735,14 @@ int free_swap_and_cache(swp_entry_t entry)
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
- int i;
+ int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *sis = swap_info + i;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
@@ -758,20 +752,18 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
- return i;
+ return type;
}
if (bdev == sis->bdev) {
- struct swap_extent *se;
+ struct swap_extent *se = &sis->first_swap_extent;
- se = list_entry(sis->extent_list.next,
- struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
- return i;
+ return type;
}
}
}
@@ -783,6 +775,21 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
}
/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int type, pgoff_t offset)
+{
+ struct block_device *bdev;
+
+ if ((unsigned int)type >= nr_swapfiles)
+ return 0;
+ if (!(swap_info[type]->flags & SWP_WRITEOK))
+ return 0;
+ return map_swap_entry(swp_entry(type, offset), &bdev);
+}
+
+/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
@@ -792,18 +799,20 @@ unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
- if (type < nr_swapfiles) {
- spin_lock(&swap_lock);
- if (swap_info[type].flags & SWP_WRITEOK) {
- n = swap_info[type].pages;
+ spin_lock(&swap_lock);
+ if ((unsigned int)type < nr_swapfiles) {
+ struct swap_info_struct *sis = swap_info[type];
+
+ if (sis->flags & SWP_WRITEOK) {
+ n = sis->pages;
if (free)
- n -= swap_info[type].inuse_pages;
+ n -= sis->inuse_pages;
}
- spin_unlock(&swap_lock);
}
+ spin_unlock(&swap_lock);
return n;
}
-#endif
+#endif /* CONFIG_HIBERNATION */
/*
* No need to decide whether this PTE shares the swap entry with others,
@@ -932,7 +941,7 @@ static int unuse_vma(struct vm_area_struct *vma,
unsigned long addr, end, next;
int ret;
- if (page->mapping) {
+ if (page_anon_vma(page)) {
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
return 0;
@@ -988,7 +997,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
{
unsigned int max = si->max;
unsigned int i = prev;
- int count;
+ unsigned char count;
/*
* No need for swap_lock here: we're just looking
@@ -1024,16 +1033,14 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
*/
static int try_to_unuse(unsigned int type)
{
- struct swap_info_struct * si = &swap_info[type];
+ struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
- unsigned short *swap_map;
- unsigned short swcount;
+ unsigned char *swap_map;
+ unsigned char swcount;
struct page *page;
swp_entry_t entry;
unsigned int i = 0;
int retval = 0;
- int reset_overflow = 0;
- int shmem;
/*
* When searching mms for an entry, a good strategy is to
@@ -1047,8 +1054,7 @@ static int try_to_unuse(unsigned int type)
* together, child after parent. If we race with dup_mmap(), we
* prefer to resolve parent before child, lest we miss entries
* duplicated after we scanned child: using last mm would invert
- * that. Though it's only a serious concern when an overflowed
- * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
+ * that.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
@@ -1110,17 +1116,18 @@ static int try_to_unuse(unsigned int type)
/*
* Remove all references to entry.
- * Whenever we reach init_mm, there's no address space
- * to search, but use it as a reminder to search shmem.
*/
- shmem = 0;
swcount = *swap_map;
- if (swap_count(swcount)) {
- if (start_mm == &init_mm)
- shmem = shmem_unuse(entry, page);
- else
- retval = unuse_mm(start_mm, entry, page);
+ if (swap_count(swcount) == SWAP_MAP_SHMEM) {
+ retval = shmem_unuse(entry, page);
+ /* page has already been unlocked and released */
+ if (retval < 0)
+ break;
+ continue;
}
+ if (swap_count(swcount) && start_mm != &init_mm)
+ retval = unuse_mm(start_mm, entry, page);
+
if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
@@ -1131,7 +1138,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
- while (swap_count(*swap_map) && !retval && !shmem &&
+ while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1145,10 +1152,9 @@ static int try_to_unuse(unsigned int type)
swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */
;
- else if (mm == &init_mm) {
+ else if (mm == &init_mm)
set_start_mm = 1;
- shmem = shmem_unuse(entry, page);
- } else
+ else
retval = unuse_mm(mm, entry, page);
if (set_start_mm && *swap_map < swcount) {
@@ -1164,13 +1170,6 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm);
start_mm = new_start_mm;
}
- if (shmem) {
- /* page has already been unlocked and released */
- if (shmem > 0)
- continue;
- retval = shmem;
- break;
- }
if (retval) {
unlock_page(page);
page_cache_release(page);
@@ -1178,30 +1177,6 @@ static int try_to_unuse(unsigned int type)
}
/*
- * How could swap count reach 0x7ffe ?
- * There's no way to repeat a swap page within an mm
- * (except in shmem, where it's the shared object which takes
- * the reference count)?
- * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
- * short is too small....)
- * If that's wrong, then we should worry more about
- * exit_mmap() and do_munmap() cases described above:
- * we might be resetting SWAP_MAP_MAX too early here.
- * We know "Undead"s can happen, they're okay, so don't
- * report them; but do report if we reset SWAP_MAP_MAX.
- */
- /* We might release the lock_page() in unuse_mm(). */
- if (!PageSwapCache(page) || page_private(page) != entry.val)
- goto retry;
-
- if (swap_count(*swap_map) == SWAP_MAP_MAX) {
- spin_lock(&swap_lock);
- *swap_map = encode_swapmap(0, true);
- spin_unlock(&swap_lock);
- reset_overflow = 1;
- }
-
- /*
* If a reference remains (rare), we would like to leave
* the page in the swap cache; but try_to_unmap could
* then re-duplicate the entry once we drop page lock,
@@ -1213,6 +1188,12 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
+ *
+ * Given how unuse_vma() targets one particular offset
+ * in an anon_vma, once the anon_vma has been determined,
+ * this splitting happens to be just what is needed to
+ * handle where KSM pages have been swapped out: re-reading
+ * is unnecessarily slow, but we can fix that later on.
*/
if (swap_count(*swap_map) &&
PageDirty(page) && PageSwapCache(page)) {
@@ -1242,7 +1223,6 @@ static int try_to_unuse(unsigned int type)
* mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
-retry:
unlock_page(page);
page_cache_release(page);
@@ -1254,10 +1234,6 @@ retry:
}
mmput(start_mm);
- if (reset_overflow) {
- printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
- swap_overflow = 0;
- }
return retval;
}
@@ -1270,10 +1246,10 @@ retry:
static void drain_mmlist(void)
{
struct list_head *p, *next;
- unsigned int i;
+ unsigned int type;
- for (i = 0; i < nr_swapfiles; i++)
- if (swap_info[i].inuse_pages)
+ for (type = 0; type < nr_swapfiles; type++)
+ if (swap_info[type]->inuse_pages)
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
@@ -1283,12 +1259,23 @@ static void drain_mmlist(void)
/*
* Use this swapdev's extent info to locate the (PAGE_SIZE) block which
- * corresponds to page offset `offset'.
+ * corresponds to page offset for the specified swap entry.
+ * Note that the type of this function is sector_t, but it returns page offset
+ * into the bdev, not sector offset.
*/
-sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
+static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
- struct swap_extent *se = sis->curr_swap_extent;
- struct swap_extent *start_se = se;
+ struct swap_info_struct *sis;
+ struct swap_extent *start_se;
+ struct swap_extent *se;
+ pgoff_t offset;
+
+ sis = swap_info[swp_type(entry)];
+ *bdev = sis->bdev;
+
+ offset = swp_offset(entry);
+ start_se = sis->curr_swap_extent;
+ se = start_se;
for ( ; ; ) {
struct list_head *lh;
@@ -1298,40 +1285,31 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
return se->start_block + (offset - se->start_page);
}
lh = se->list.next;
- if (lh == &sis->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
-#ifdef CONFIG_HIBERNATION
/*
- * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
- * corresponding to given index in swap_info (swap type).
+ * Returns the page offset into bdev for the specified page's swap entry.
*/
-sector_t swapdev_block(int swap_type, pgoff_t offset)
+sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
- struct swap_info_struct *sis;
-
- if (swap_type >= nr_swapfiles)
- return 0;
-
- sis = swap_info + swap_type;
- return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+ swp_entry_t entry;
+ entry.val = page_private(page);
+ return map_swap_entry(entry, bdev);
}
-#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
- while (!list_empty(&sis->extent_list)) {
+ while (!list_empty(&sis->first_swap_extent.list)) {
struct swap_extent *se;
- se = list_entry(sis->extent_list.next,
+ se = list_entry(sis->first_swap_extent.list.next,
struct swap_extent, list);
list_del(&se->list);
kfree(se);
@@ -1352,8 +1330,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
struct swap_extent *new_se;
struct list_head *lh;
- lh = sis->extent_list.prev; /* The highest page extent */
- if (lh != &sis->extent_list) {
+ if (start_page == 0) {
+ se = &sis->first_swap_extent;
+ sis->curr_swap_extent = se;
+ se->start_page = 0;
+ se->nr_pages = nr_pages;
+ se->start_block = start_block;
+ return 1;
+ } else {
+ lh = sis->first_swap_extent.list.prev; /* Highest extent */
se = list_entry(lh, struct swap_extent, list);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
@@ -1373,7 +1358,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
- list_add_tail(&new_se->list, &sis->extent_list);
+ list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
@@ -1425,7 +1410,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
- goto done;
+ goto out;
}
blkbits = inode->i_blkbits;
@@ -1496,25 +1481,22 @@ reprobe:
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
-done:
- sis->curr_swap_extent = list_entry(sis->extent_list.prev,
- struct swap_extent, list);
- goto out;
+out:
+ return ret;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
-out:
- return ret;
+ goto out;
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
- struct swap_info_struct * p = NULL;
- unsigned short *swap_map;
+ struct swap_info_struct *p = NULL;
+ unsigned char *swap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
- char * pathname;
+ char *pathname;
int i, type, prev;
int err;
@@ -1535,8 +1517,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mapping = victim->f_mapping;
prev = -1;
spin_lock(&swap_lock);
- for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
- p = swap_info + type;
+ for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
+ p = swap_info[type];
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping)
break;
@@ -1555,18 +1537,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- if (prev < 0) {
+ if (prev < 0)
swap_list.head = p->next;
- } else {
- swap_info[prev].next = p->next;
- }
+ else
+ swap_info[prev]->next = p->next;
if (type == swap_list.next) {
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
if (p->prio < 0) {
- for (i = p->next; i >= 0; i = swap_info[i].next)
- swap_info[i].prio = p->prio--;
+ for (i = p->next; i >= 0; i = swap_info[i]->next)
+ swap_info[i]->prio = p->prio--;
least_priority++;
}
nr_swap_pages -= p->pages;
@@ -1584,16 +1565,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (p->prio < 0)
p->prio = --least_priority;
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio)
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
prev = i;
}
p->next = i;
if (prev < 0)
- swap_list.head = swap_list.next = p - swap_info;
+ swap_list.head = swap_list.next = type;
else
- swap_info[prev].next = p - swap_info;
+ swap_info[prev]->next = type;
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
@@ -1606,6 +1587,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
up_write(&swap_unplug_sem);
destroy_swap_extents(p);
+ if (p->flags & SWP_CONTINUED)
+ free_swap_count_continuations(p);
+
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
drain_mmlist();
@@ -1653,8 +1637,8 @@ out:
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
- struct swap_info_struct *ptr = swap_info;
- int i;
+ struct swap_info_struct *si;
+ int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
@@ -1662,11 +1646,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
if (!l)
return SEQ_START_TOKEN;
- for (i = 0; i < nr_swapfiles; i++, ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (type = 0; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
- return ptr;
+ return si;
}
return NULL;
@@ -1674,21 +1660,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
- struct swap_info_struct *ptr;
- struct swap_info_struct *endptr = swap_info + nr_swapfiles;
+ struct swap_info_struct *si = v;
+ int type;
if (v == SEQ_START_TOKEN)
- ptr = swap_info;
- else {
- ptr = v;
- ptr++;
- }
+ type = 0;
+ else
+ type = si->type + 1;
- for (; ptr < endptr; ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
- return ptr;
+ return si;
}
return NULL;
@@ -1701,24 +1687,24 @@ static void swap_stop(struct seq_file *swap, void *v)
static int swap_show(struct seq_file *swap, void *v)
{
- struct swap_info_struct *ptr = v;
+ struct swap_info_struct *si = v;
struct file *file;
int len;
- if (ptr == SEQ_START_TOKEN) {
+ if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
- file = ptr->swap_file;
+ file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
- ptr->pages << (PAGE_SHIFT - 10),
- ptr->inuse_pages << (PAGE_SHIFT - 10),
- ptr->prio);
+ si->pages << (PAGE_SHIFT - 10),
+ si->inuse_pages << (PAGE_SHIFT - 10),
+ si->prio);
return 0;
}
@@ -1765,7 +1751,7 @@ late_initcall(max_swapfiles_check);
*/
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
char *name = NULL;
struct block_device *bdev = NULL;
struct file *swap_file = NULL;
@@ -1779,30 +1765,52 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages = 1;
unsigned long swapfilepages;
- unsigned short *swap_map = NULL;
+ unsigned char *swap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
int did_down = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
spin_lock(&swap_lock);
- p = swap_info;
- for (type = 0 ; type < nr_swapfiles ; type++,p++)
- if (!(p->flags & SWP_USED))
+ for (type = 0; type < nr_swapfiles; type++) {
+ if (!(swap_info[type]->flags & SWP_USED))
break;
+ }
error = -EPERM;
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
+ kfree(p);
goto out;
}
- if (type >= nr_swapfiles)
- nr_swapfiles = type+1;
- memset(p, 0, sizeof(*p));
- INIT_LIST_HEAD(&p->extent_list);
+ if (type >= nr_swapfiles) {
+ p->type = type;
+ swap_info[type] = p;
+ /*
+ * Write swap_info[type] before nr_swapfiles, in case a
+ * racing procfs swap_start() or swap_next() is reading them.
+ * (We never shrink nr_swapfiles, we never free this entry.)
+ */
+ smp_wmb();
+ nr_swapfiles++;
+ } else {
+ kfree(p);
+ p = swap_info[type];
+ /*
+ * Do not memset this entry: a racing procfs swap_next()
+ * would be relying on p->type to remain valid.
+ */
+ }
+ INIT_LIST_HEAD(&p->first_swap_extent.list);
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
+
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
@@ -1822,7 +1830,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EBUSY;
for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *q = &swap_info[i];
+ struct swap_info_struct *q = swap_info[i];
if (i == type || !q->swap_file)
continue;
@@ -1897,6 +1905,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->lowest_bit = 1;
p->cluster_next = 1;
+ p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
@@ -1932,13 +1941,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
/* OK, set up the swap map and apply the bad block list */
- swap_map = vmalloc(maxpages * sizeof(short));
+ swap_map = vmalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
goto bad_swap;
}
- memset(swap_map, 0, maxpages * sizeof(short));
+ memset(swap_map, 0, maxpages);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
int page_nr = swap_header->info.badpages[i];
if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
@@ -2003,18 +2012,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
/* insert swap space into swap_list: */
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio) {
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
- }
prev = i;
}
p->next = i;
- if (prev < 0) {
- swap_list.head = swap_list.next = p - swap_info;
- } else {
- swap_info[prev].next = p - swap_info;
- }
+ if (prev < 0)
+ swap_list.head = swap_list.next = type;
+ else
+ swap_info[prev]->next = type;
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
error = 0;
@@ -2051,15 +2058,15 @@ out:
void si_swapinfo(struct sysinfo *val)
{
- unsigned int i;
+ unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- if (!(swap_info[i].flags & SWP_USED) ||
- (swap_info[i].flags & SWP_WRITEOK))
- continue;
- nr_to_be_unused += swap_info[i].inuse_pages;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *si = swap_info[type];
+
+ if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
+ nr_to_be_unused += si->inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
@@ -2069,101 +2076,107 @@ void si_swapinfo(struct sysinfo *val)
/*
* Verify that a swap entry is valid and increment its swap map count.
*
- * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
- * "permanent", but will be reclaimed by the next swapoff.
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
+ * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, bool cache)
+static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
- int result = -EINVAL;
- int count;
- bool has_cache;
+ unsigned char count;
+ unsigned char has_cache;
+ int err = -EINVAL;
if (non_swap_entry(entry))
- return -EINVAL;
+ goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
- p = type + swap_info;
+ p = swap_info[type];
offset = swp_offset(entry);
spin_lock(&swap_lock);
-
if (unlikely(offset >= p->max))
goto unlock_out;
- count = swap_count(p->swap_map[offset]);
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+ err = 0;
- if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
+ if (usage == SWAP_HAS_CACHE) {
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count) {
- p->swap_map[offset] = encode_swapmap(count, true);
- result = 0;
- } else if (has_cache) /* someone added cache */
- result = -EEXIST;
- else if (!count) /* no users */
- result = -ENOENT;
+ if (!has_cache && count)
+ has_cache = SWAP_HAS_CACHE;
+ else if (has_cache) /* someone else added cache */
+ err = -EEXIST;
+ else /* no users remaining */
+ err = -ENOENT;
} else if (count || has_cache) {
- if (count < SWAP_MAP_MAX - 1) {
- p->swap_map[offset] = encode_swapmap(count + 1,
- has_cache);
- result = 0;
- } else if (count <= SWAP_MAP_MAX) {
- if (swap_overflow++ < 5)
- printk(KERN_WARNING
- "swap_dup: swap entry overflow\n");
- p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
- has_cache);
- result = 0;
- }
+
+ if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ count += usage;
+ else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
+ err = -EINVAL;
+ else if (swap_count_continued(p, offset, count))
+ count = COUNT_CONTINUED;
+ else
+ err = -ENOMEM;
} else
- result = -ENOENT; /* unused swap entry */
+ err = -ENOENT; /* unused swap entry */
+
+ p->swap_map[offset] = count | has_cache;
+
unlock_out:
spin_unlock(&swap_lock);
out:
- return result;
+ return err;
bad_file:
printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
+
+/*
+ * Help swapoff by noting that swap entry belongs to shmem/tmpfs
+ * (in which case its reference count is never incremented).
+ */
+void swap_shmem_alloc(swp_entry_t entry)
+{
+ __swap_duplicate(entry, SWAP_MAP_SHMEM);
+}
+
/*
* increase reference count of swap entry by 1.
*/
-void swap_duplicate(swp_entry_t entry)
+int swap_duplicate(swp_entry_t entry)
{
- __swap_duplicate(entry, SWAP_MAP);
+ int err = 0;
+
+ while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ err = add_swap_count_continuation(entry, GFP_ATOMIC);
+ return err;
}
/*
* @entry: swap entry for which we allocate swap cache.
*
- * Called when allocating swap cache for exising swap entry,
+ * Called when allocating swap cache for existing swap entry,
* This can return error codes. Returns 0 at success.
* -EBUSY means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
{
- return __swap_duplicate(entry, SWAP_CACHE);
-}
-
-
-struct swap_info_struct *
-get_swap_info_struct(unsigned type)
-{
- return &swap_info[type];
+ return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
/*
@@ -2181,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
if (!our_page_cluster) /* no readahead */
return 0;
- si = &swap_info[swp_type(entry)];
+ si = swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
@@ -2217,3 +2230,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}
+
+/*
+ * add_swap_count_continuation - called when a swap count is duplicated
+ * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
+ * page of the original vmalloc'ed swap_map, to hold the continuation count
+ * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
+ * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
+ *
+ * These continuation pages are seldom referenced: the common paths all work
+ * on the original swap_map, only referring to a continuation page when the
+ * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
+ *
+ * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
+ * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
+ * can be called after dropping locks.
+ */
+int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+{
+ struct swap_info_struct *si;
+ struct page *head;
+ struct page *page;
+ struct page *list_page;
+ pgoff_t offset;
+ unsigned char count;
+
+ /*
+ * When debugging, it's easier to use __GFP_ZERO here; but it's better
+ * for latency not to zero a page while GFP_ATOMIC and holding locks.
+ */
+ page = alloc_page(gfp_mask | __GFP_HIGHMEM);
+
+ si = swap_info_get(entry);
+ if (!si) {
+ /*
+ * An acceptable race has occurred since the failing
+ * __swap_duplicate(): the swap entry has been freed,
+ * perhaps even the whole swap_map cleared for swapoff.
+ */
+ goto outer;
+ }
+
+ offset = swp_offset(entry);
+ count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
+
+ if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
+ /*
+ * The higher the swap count, the more likely it is that tasks
+ * will race to add swap count continuation: we need to avoid
+ * over-provisioning.
+ */
+ goto out;
+ }
+
+ if (!page) {
+ spin_unlock(&swap_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * We are fortunate that although vmalloc_to_page uses pte_offset_map,
+ * no architecture is using highmem pages for kernel pagetables: so it
+ * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
+ */
+ head = vmalloc_to_page(si->swap_map + offset);
+ offset &= ~PAGE_MASK;
+
+ /*
+ * Page allocation does not initialize the page's lru field,
+ * but it does always reset its private field.
+ */
+ if (!page_private(head)) {
+ BUG_ON(count & COUNT_CONTINUED);
+ INIT_LIST_HEAD(&head->lru);
+ set_page_private(head, SWP_CONTINUED);
+ si->flags |= SWP_CONTINUED;
+ }
+
+ list_for_each_entry(list_page, &head->lru, lru) {
+ unsigned char *map;
+
+ /*
+ * If the previous map said no continuation, but we've found
+ * a continuation page, free our allocation and use this one.
+ */
+ if (!(count & COUNT_CONTINUED))
+ goto out;
+
+ map = kmap_atomic(list_page, KM_USER0) + offset;
+ count = *map;
+ kunmap_atomic(map, KM_USER0);
+
+ /*
+ * If this continuation count now has some space in it,
+ * free our allocation and use this one.
+ */
+ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+ goto out;
+ }
+
+ list_add_tail(&page->lru, &head->lru);
+ page = NULL; /* now it's attached, don't free it */
+out:
+ spin_unlock(&swap_lock);
+outer:
+ if (page)
+ __free_page(page);
+ return 0;
+}
+
+/*
+ * swap_count_continued - when the original swap_map count is incremented
+ * from SWAP_MAP_MAX, check if there is already a continuation page to carry
+ * into, carry if so, or else fail until a new continuation page is allocated;
+ * when the original swap_map count is decremented from 0 with continuation,
+ * borrow from the continuation and report whether it still holds more.
+ * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
+ */
+static bool swap_count_continued(struct swap_info_struct *si,
+ pgoff_t offset, unsigned char count)
+{
+ struct page *head;
+ struct page *page;
+ unsigned char *map;
+
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head) != SWP_CONTINUED) {
+ BUG_ON(count & COUNT_CONTINUED);
+ return false; /* need to add count continuation */
+ }
+
+ offset &= ~PAGE_MASK;
+ page = list_entry(head->lru.next, struct page, lru);
+ map = kmap_atomic(page, KM_USER0) + offset;
+
+ if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
+ goto init_map; /* jump over SWAP_CONT_MAX checks */
+
+ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
+ /*
+ * Think of how you add 1 to 999
+ */
+ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ if (*map == SWAP_CONT_MAX) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ if (page == head)
+ return false; /* add count continuation */
+ map = kmap_atomic(page, KM_USER0) + offset;
+init_map: *map = 0; /* we didn't zero the page */
+ }
+ *map += 1;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return true; /* incremented */
+
+ } else { /* decrementing */
+ /*
+ * Think of how you subtract 1 from 1000
+ */
+ BUG_ON(count != COUNT_CONTINUED);
+ while (*map == COUNT_CONTINUED) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ BUG_ON(*map == 0);
+ *map -= 1;
+ if (*map == 0)
+ count = 0;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = SWAP_CONT_MAX | count;
+ count = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return count == COUNT_CONTINUED;
+ }
+}
+
+/*
+ * free_swap_count_continuations - swapoff free all the continuation pages
+ * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
+ */
+static void free_swap_count_continuations(struct swap_info_struct *si)
+{
+ pgoff_t offset;
+
+ for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
+ struct page *head;
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head)) {
+ struct list_head *this, *next;
+ list_for_each_safe(this, next, &head->lru) {
+ struct page *page;
+ page = list_entry(this, struct page, lru);
+ list_del(this);
+ __free_page(page);
+ }
+ }
+ }
+}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f551a4a44cd..37e69295f250 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
spin_lock(&vbq->lock);
list_add(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
- put_cpu_var(vmap_cpu_blocks);
+ put_cpu_var(vmap_block_queue);
return vb;
}
@@ -826,7 +826,7 @@ again:
}
spin_unlock(&vb->lock);
}
- put_cpu_var(vmap_cpu_blocks);
+ put_cpu_var(vmap_block_queue);
rcu_read_unlock();
if (!addr) {
@@ -1411,6 +1411,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
{
struct page **pages;
unsigned int nr_pages, array_size, i;
+ gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
@@ -1418,13 +1419,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
+ pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES;
} else {
- pages = kmalloc_node(array_size,
- (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
- node);
+ pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
area->caller = caller;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 777af57fd8c8..885207a6b6b7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -55,6 +55,11 @@ struct scan_control {
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+ /* How many pages shrink_list() should reclaim */
+ unsigned long nr_to_reclaim;
+
+ unsigned long hibernation_mode;
+
/* This context's GFP mask */
gfp_t gfp_mask;
@@ -66,12 +71,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
int may_swap;
- /* This context's SWAP_CLUSTER_MAX. If freeing memory for
- * suspend, we effectively ignore SWAP_CLUSTER_MAX.
- * In this context, it doesn't matter that we scan the
- * whole list at once. */
- int swap_cluster_max;
-
int swappiness;
int all_unreclaimable;
@@ -358,7 +357,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
- * If this process is currently in generic_file_write() against
+ * If this process is currently in __generic_file_aio_write() against
* this page's queue, we can perform writeback even if that
* will block.
*
@@ -1132,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_anon;
unsigned long nr_file;
- nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+ nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);
@@ -1166,10 +1165,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
- reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
- reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
- reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
- reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+ reclaim_stat->recent_scanned[0] += nr_anon;
+ reclaim_stat->recent_scanned[1] += nr_file;
spin_unlock_irq(&zone->lru_lock);
@@ -1464,20 +1461,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
return low;
}
+static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
+ int file)
+{
+ if (file)
+ return inactive_file_is_low(zone, sc);
+ else
+ return inactive_anon_is_low(zone, sc);
+}
+
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
int file = is_file_lru(lru);
- if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ if (is_active_lru(lru)) {
+ if (inactive_list_is_low(zone, sc, file))
+ shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
- if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
- return 0;
- }
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}
@@ -1567,15 +1570,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* until we collected @swap_cluster_max pages to scan.
*/
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
- unsigned long *nr_saved_scan,
- unsigned long swap_cluster_max)
+ unsigned long *nr_saved_scan)
{
unsigned long nr;
*nr_saved_scan += nr_to_scan;
nr = *nr_saved_scan;
- if (nr >= swap_cluster_max)
+ if (nr >= SWAP_CLUSTER_MAX)
*nr_saved_scan = 0;
else
nr = 0;
@@ -1594,7 +1596,7 @@ static void shrink_zone(int priority, struct zone *zone,
unsigned long percent[2]; /* anon @ 0; file @ 1 */
enum lru_list l;
unsigned long nr_reclaimed = sc->nr_reclaimed;
- unsigned long swap_cluster_max = sc->swap_cluster_max;
+ unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int noswap = 0;
@@ -1616,15 +1618,15 @@ static void shrink_zone(int priority, struct zone *zone,
scan = (scan * percent[file]) / 100;
}
nr[l] = nr_scan_try_batch(scan,
- &reclaim_stat->nr_saved_scan[l],
- swap_cluster_max);
+ &reclaim_stat->nr_saved_scan[l]);
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
if (nr[l]) {
- nr_to_scan = min(nr[l], swap_cluster_max);
+ nr_to_scan = min_t(unsigned long,
+ nr[l], SWAP_CLUSTER_MAX);
nr[l] -= nr_to_scan;
nr_reclaimed += shrink_list(l, nr_to_scan,
@@ -1639,8 +1641,7 @@ static void shrink_zone(int priority, struct zone *zone,
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed > swap_cluster_max &&
- priority < DEF_PRIORITY && !current_is_kswapd())
+ if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
break;
}
@@ -1738,6 +1739,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+ unsigned long writeback_threshold;
delayacct_freepages_start();
@@ -1773,7 +1775,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
}
}
total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->swap_cluster_max) {
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
ret = sc->nr_reclaimed;
goto out;
}
@@ -1785,14 +1787,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
* that's undesirable in laptop mode, where we *want* lumpy
* writeout. So in laptop mode, write out the whole world.
*/
- if (total_scanned > sc->swap_cluster_max +
- sc->swap_cluster_max / 2) {
+ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
+ if (total_scanned > writeback_threshold) {
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1;
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+ if (!sc->hibernation_mode && sc->nr_scanned &&
+ priority < DEF_PRIORITY - 2)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
/* top priority shrink_zones still had more to do? don't OOM, then */
@@ -1831,7 +1834,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
@@ -1855,7 +1858,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem,
@@ -1889,7 +1891,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
@@ -1904,6 +1906,30 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
}
#endif
+/* is kswapd sleeping prematurely? */
+static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+{
+ int i;
+
+ /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
+ if (remaining)
+ return 1;
+
+ /* If after HZ/10, a zone is below the high mark, it's premature */
+ for (i = 0; i < pgdat->nr_zones; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+ 0, 0))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
@@ -1936,7 +1962,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
.may_swap = 1,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ /*
+ * kswapd doesn't want to be bailed out while reclaim. because
+ * we want to put equal scanning pressure on each zone.
+ */
+ .nr_to_reclaim = ULONG_MAX,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
@@ -1961,6 +1991,7 @@ loop_again:
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ int has_under_min_watermark_zone = 0;
/* The swap token gets in the way of swapout... */
if (!priority)
@@ -2067,6 +2098,15 @@ loop_again:
if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
sc.may_writepage = 1;
+
+ /*
+ * We are still under min water mark. it mean we have
+ * GFP_ATOMIC allocation failure risk. Hurry up!
+ */
+ if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
+ end_zone, 0))
+ has_under_min_watermark_zone = 1;
+
}
if (all_zones_ok)
break; /* kswapd: all done */
@@ -2074,8 +2114,12 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (has_under_min_watermark_zone)
+ count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
+ else
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
/*
* We do this so kswapd doesn't build up large priorities for
@@ -2173,6 +2217,7 @@ static int kswapd(void *p)
order = 0;
for ( ; ; ) {
unsigned long new_order;
+ int ret;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
new_order = pgdat->kswapd_max_order;
@@ -2184,19 +2229,45 @@ static int kswapd(void *p)
*/
order = new_order;
} else {
- if (!freezing(current))
- schedule();
+ if (!freezing(current) && !kthread_should_stop()) {
+ long remaining = 0;
+
+ /* Try to sleep for a short interval */
+ if (!sleeping_prematurely(pgdat, order, remaining)) {
+ remaining = schedule_timeout(HZ/10);
+ finish_wait(&pgdat->kswapd_wait, &wait);
+ prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+ }
+
+ /*
+ * After a short sleep, check if it was a
+ * premature sleep. If not, then go fully
+ * to sleep until explicitly woken up
+ */
+ if (!sleeping_prematurely(pgdat, order, remaining))
+ schedule();
+ else {
+ if (remaining)
+ count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+ else
+ count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+ }
+ }
order = pgdat->kswapd_max_order;
}
finish_wait(&pgdat->kswapd_wait, &wait);
- if (!try_to_freeze()) {
- /* We can speed up thawing tasks if we don't call
- * balance_pgdat after returning from the refrigerator
- */
+ ret = try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * We can speed up thawing tasks if we don't call balance_pgdat
+ * after returning from the refrigerator
+ */
+ if (!ret)
balance_pgdat(pgdat, order);
- }
}
return 0;
}
@@ -2260,148 +2331,43 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
#ifdef CONFIG_HIBERNATION
/*
- * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority.
- *
- * For pass > 3 we also try to shrink the LRU lists that contain a few pages
- */
-static void shrink_all_zones(unsigned long nr_pages, int prio,
- int pass, struct scan_control *sc)
-{
- struct zone *zone;
- unsigned long nr_reclaimed = 0;
- struct zone_reclaim_stat *reclaim_stat;
-
- for_each_populated_zone(zone) {
- enum lru_list l;
-
- if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
- continue;
-
- for_each_evictable_lru(l) {
- enum zone_stat_item ls = NR_LRU_BASE + l;
- unsigned long lru_pages = zone_page_state(zone, ls);
-
- /* For pass = 0, we don't shrink the active list */
- if (pass == 0 && (l == LRU_ACTIVE_ANON ||
- l == LRU_ACTIVE_FILE))
- continue;
-
- reclaim_stat = get_reclaim_stat(zone, sc);
- reclaim_stat->nr_saved_scan[l] +=
- (lru_pages >> prio) + 1;
- if (reclaim_stat->nr_saved_scan[l]
- >= nr_pages || pass > 3) {
- unsigned long nr_to_scan;
-
- reclaim_stat->nr_saved_scan[l] = 0;
- nr_to_scan = min(nr_pages, lru_pages);
- nr_reclaimed += shrink_list(l, nr_to_scan, zone,
- sc, prio);
- if (nr_reclaimed >= nr_pages) {
- sc->nr_reclaimed += nr_reclaimed;
- return;
- }
- }
- }
- }
- sc->nr_reclaimed += nr_reclaimed;
-}
-
-/*
- * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
* freed pages.
*
* Rather than trying to age LRUs the aim is to preserve the overall
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_pages)
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
{
- unsigned long lru_pages, nr_slab;
- int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
- .gfp_mask = GFP_KERNEL,
- .may_unmap = 0,
+ .gfp_mask = GFP_HIGHUSER_MOVABLE,
+ .may_swap = 1,
+ .may_unmap = 1,
.may_writepage = 1,
+ .nr_to_reclaim = nr_to_reclaim,
+ .hibernation_mode = 1,
+ .swappiness = vm_swappiness,
+ .order = 0,
.isolate_pages = isolate_pages_global,
- .nr_reclaimed = 0,
};
+ struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+ struct task_struct *p = current;
+ unsigned long nr_reclaimed;
- current->reclaim_state = &reclaim_state;
-
- lru_pages = global_reclaimable_pages();
- nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
- /* If slab caches are huge, it's better to hit them first */
- while (nr_slab >= lru_pages) {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
- if (!reclaim_state.reclaimed_slab)
- break;
-
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- nr_slab -= reclaim_state.reclaimed_slab;
- }
-
- /*
- * We try to shrink LRUs in 5 passes:
- * 0 = Reclaim from inactive_list only
- * 1 = Reclaim from active list but don't reclaim mapped
- * 2 = 2nd pass of type 1
- * 3 = Reclaim mapped (normal reclaim)
- * 4 = 2nd pass of type 3
- */
- for (pass = 0; pass < 5; pass++) {
- int prio;
-
- /* Force reclaiming mapped pages in the passes #3 and #4 */
- if (pass > 2)
- sc.may_unmap = 1;
-
- for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
-
- sc.nr_scanned = 0;
- sc.swap_cluster_max = nr_to_scan;
- shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, sc.gfp_mask,
- global_reclaimable_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ / 10);
- }
- }
-
- /*
- * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
- * something in slab caches
- */
- if (!sc.nr_reclaimed) {
- do {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask,
- global_reclaimable_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- } while (sc.nr_reclaimed < nr_pages &&
- reclaim_state.reclaimed_slab > 0);
- }
+ p->flags |= PF_MEMALLOC;
+ lockdep_set_current_reclaim_state(sc.gfp_mask);
+ reclaim_state.reclaimed_slab = 0;
+ p->reclaim_state = &reclaim_state;
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
-out:
- current->reclaim_state = NULL;
+ p->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+ p->flags &= ~PF_MEMALLOC;
- return sc.nr_reclaimed;
+ return nr_reclaimed;
}
#endif /* CONFIG_HIBERNATION */
@@ -2451,6 +2417,17 @@ int kswapd_run(int nid)
return ret;
}
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+ if (kswapd)
+ kthread_stop(kswapd);
+}
+
static int __init kswapd_init(void)
{
int nid;
@@ -2553,8 +2530,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.may_swap = 1,
- .swap_cluster_max = max_t(unsigned long, nr_pages,
- SWAP_CLUSTER_MAX),
+ .nr_to_reclaim = max_t(unsigned long, nr_pages,
+ SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
.order = order,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9feec..6051fbab67ba 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -683,6 +683,9 @@ static const char * const vmstat_text[] = {
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
+ "kswapd_low_wmark_hit_quickly",
+ "kswapd_high_wmark_hit_quickly",
+ "kswapd_skip_congestion_wait",
"pageoutrun",
"allocstall",
@@ -883,11 +886,10 @@ static void vmstat_update(struct work_struct *w)
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+ struct delayed_work *work = &per_cpu(vmstat_work, cpu);
- INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
- schedule_delayed_work_on(cpu, vmstat_work,
- __round_jiffies_relative(HZ, cpu));
+ INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+ schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b001c361ad30..4300df35d37d 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -249,6 +249,7 @@
#include <linux/poll.h>
#include <linux/capability.h>
#include <linux/ctype.h> /* isspace() */
+#include <linux/string.h> /* skip_spaces() */
#include <asm/uaccess.h>
#include <linux/init.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7dea882dbb75..156020d138b5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap,
/* Look at the next command */
start = next;
- /* Scrap whitespaces before the command */
- while(isspace(*start))
- start++;
+ /* Scrap whitespaces before the command */
+ start = skip_spaces(start);
/* ',' is our command separator */
next = strchr(start, ',');
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap,
char * endp;
/* Scrap whitespaces before the command */
- while(isspace(*begp))
- begp++;
+ begp = skip_spaces(begp);
/* Convert argument to a number (last arg is the base) */
addr = simple_strtoul(begp, &endp, 16);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e428863574f..c18286a2167b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops afiucv_pm_ops = {
+static const struct dev_pm_ops afiucv_pm_ops = {
.prepare = afiucv_pm_prepare,
.complete = afiucv_pm_complete,
.freeze = afiucv_pm_freeze,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3b1f5f5f8de7..fd8b28361a64 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *);
static int iucv_pm_thaw(struct device *);
static int iucv_pm_restore(struct device *);
-static struct dev_pm_ops iucv_pm_ops = {
+static const struct dev_pm_ops iucv_pm_ops = {
.prepare = iucv_pm_prepare,
.complete = iucv_pm_complete,
.freeze = iucv_pm_freeze,
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index eb0ceb846527..fc70a49c0afd 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file,
if (copy_from_user(buf, input, size))
return -EFAULT;
- while (isspace(*c))
- c++;
+ c = skip_spaces(c);
if (size - (c - buf) < 5)
return c - buf;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index c7450c8f0a7c..6dcdd2517819 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
/*
* RFC 4291, Section 2.2.1
- *
- * To keep the result as short as possible, especially
- * since we don't shorthand, we don't want leading zeros
- * in each halfword, so avoid %pI6.
*/
- return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x",
- ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
- ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
- ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
- ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
+ return snprintf(buf, buflen, "%pI6c", addr);
}
static size_t rpc_ntop6(const struct sockaddr *sap,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7535a7bed2fa..f394fc190a49 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred)
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
}
-static void
+static int
rpcauth_unhash_cred(struct rpc_cred *cred)
{
spinlock_t *cache_lock;
+ int ret;
cache_lock = &cred->cr_auth->au_credcache->lock;
spin_lock(cache_lock);
- if (atomic_read(&cred->cr_count) == 0)
+ ret = atomic_read(&cred->cr_count) == 0;
+ if (ret)
rpcauth_unhash_cred_locked(cred);
spin_unlock(cache_lock);
+ return ret;
}
/*
@@ -446,31 +449,35 @@ void
put_rpccred(struct rpc_cred *cred)
{
/* Fast path for unhashed credentials */
- if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
- goto need_lock;
-
- if (!atomic_dec_and_test(&cred->cr_count))
+ if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
+ if (atomic_dec_and_test(&cred->cr_count))
+ cred->cr_ops->crdestroy(cred);
return;
- goto out_destroy;
-need_lock:
+ }
+
if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
return;
if (!list_empty(&cred->cr_lru)) {
number_cred_unused--;
list_del_init(&cred->cr_lru);
}
- if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
- rpcauth_unhash_cred(cred);
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
- cred->cr_expire = jiffies;
- list_add_tail(&cred->cr_lru, &cred_unused);
- number_cred_unused++;
- spin_unlock(&rpc_credcache_lock);
- return;
+ if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
+ cred->cr_expire = jiffies;
+ list_add_tail(&cred->cr_lru, &cred_unused);
+ number_cred_unused++;
+ goto out_nodestroy;
+ }
+ if (!rpcauth_unhash_cred(cred)) {
+ /* We were hashed and someone looked us up... */
+ goto out_nodestroy;
+ }
}
spin_unlock(&rpc_credcache_lock);
-out_destroy:
cred->cr_ops->crdestroy(cred);
+ return;
+out_nodestroy:
+ spin_unlock(&rpc_credcache_lock);
}
EXPORT_SYMBOL_GPL(put_rpccred);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fc6a43ccd950..3c3c50f38a1c 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
* to that upcall instead of adding the new upcall.
*/
static inline struct gss_upcall_msg *
-gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
+gss_add_msg(struct gss_upcall_msg *gss_msg)
{
struct rpc_inode *rpci = gss_msg->inode;
struct inode *inode = &rpci->vfs_inode;
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
if (IS_ERR(gss_new))
return gss_new;
- gss_msg = gss_add_msg(gss_auth, gss_new);
+ gss_msg = gss_add_msg(gss_new);
if (gss_msg == gss_new) {
struct inode *inode = &gss_new->inode->vfs_inode;
int res = rpc_queue_upcall(inode, &gss_new->msg);
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
cred->cr_uid);
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
- if (IS_ERR(gss_msg) == -EAGAIN) {
+ if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
* shouldn't normally hit this case on a refresh. */
warn_gssd();
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 38829e20500b..154034b675bd 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
-static int rpc_ping(struct rpc_clnt *clnt, int flags);
+static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
{
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
- int err = rpc_ping(clnt, RPC_TASK_SOFT);
+ int err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
- err = rpc_ping(clnt, RPC_TASK_SOFT);
+ err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task)
goto retry_timeout;
case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */
- dprintk("RPC: %5u remote rpcbind service unavailable\n",
+ dprintk("RPC: %5u unrecognized remote rpcbind service\n",
task->tk_pid);
break;
case -EPROTONOSUPPORT:
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task)
task->tk_status = 0;
task->tk_action = call_bind;
return;
+ case -ECONNREFUSED: /* connection problems */
+ case -ECONNRESET:
+ case -ENOTCONN:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EPIPE:
+ dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
+ task->tk_pid, task->tk_status);
+ if (!RPC_IS_SOFTCONN(task)) {
+ rpc_delay(task, 5*HZ);
+ goto retry_timeout;
+ }
+ status = task->tk_status;
+ break;
default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
task->tk_pid, -task->tk_status);
@@ -1180,11 +1195,25 @@ static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
+
+ /*
+ * Common case: success. Force the compiler to put this
+ * test first.
+ */
+ if (task->tk_status == 0) {
+ xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ return;
+ }
+
switch (task->tk_status) {
case -EAGAIN:
break;
default:
+ dprint_status(task);
xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ break;
/*
* Special cases: if we've been waiting on the
* socket's write_space() callback, or if the
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task)
* then hold onto the transport lock.
*/
case -ECONNREFUSED:
- case -ECONNRESET:
- case -ENOTCONN:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (RPC_IS_SOFTCONN(task)) {
+ xprt_end_transmit(task);
+ rpc_exit(task, task->tk_status);
+ break;
+ }
+ case -ECONNRESET:
+ case -ENOTCONN:
case -EPIPE:
rpc_task_force_reencode(task);
}
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
+ if (RPC_IS_SOFTCONN(task)) {
+ rpc_exit(task, -ETIMEDOUT);
+ return;
+ }
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = {
.p_decode = rpcproc_decode_null,
};
-static int rpc_ping(struct rpc_clnt *clnt, int flags)
+static int rpc_ping(struct rpc_clnt *clnt)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
};
int err;
msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- err = rpc_call_sync(clnt, &msg, flags);
+ err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
put_rpccred(msg.rpc_cred);
return err;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 830faf4d9997..3e3772d8eb92 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -20,6 +20,7 @@
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mutex.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static struct rpc_program rpcb_program;
+static struct rpc_clnt * rpcb_local_clnt;
+static struct rpc_clnt * rpcb_local_clnt4;
+
struct rpcbind_args {
struct rpc_xprt * r_xprt;
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = {
.sin_port = htons(RPCBIND_PORT),
};
-static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
- size_t addrlen, u32 version)
+static DEFINE_MUTEX(rpcb_create_local_mutex);
+
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local(void)
{
struct rpc_create_args args = {
- .protocol = XPRT_TRANSPORT_UDP,
- .address = addr,
- .addrsize = addrlen,
+ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *)&rpcb_inaddr_loopback,
+ .addrsize = sizeof(rpcb_inaddr_loopback),
.servername = "localhost",
.program = &rpcb_program,
- .version = version,
+ .version = RPCBVERS_2,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
+
+ if (rpcb_local_clnt)
+ return result;
+
+ mutex_lock(&rpcb_create_local_mutex);
+ if (rpcb_local_clnt)
+ goto out;
+
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+ result = -PTR_ERR(clnt);
+ goto out;
+ }
- return rpc_create(&args);
+ /*
+ * This results in an RPC ping. On systems running portmapper,
+ * the v4 ping will fail. Proceed anyway, but disallow rpcb
+ * v4 upcalls.
+ */
+ clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
+ if (IS_ERR(clnt4)) {
+ dprintk("RPC: failed to create local rpcbind v4 "
+ "cleint (errno %ld).\n", PTR_ERR(clnt4));
+ clnt4 = NULL;
+ }
+
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
+
+out:
+ mutex_unlock(&rpcb_create_local_mutex);
+ return result;
}
static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
return rpc_create(&args);
}
-static int rpcb_register_call(const u32 version, struct rpc_message *msg)
+static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
{
- struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback;
- size_t addrlen = sizeof(rpcb_inaddr_loopback);
- struct rpc_clnt *rpcb_clnt;
int result, error = 0;
msg->rpc_resp = &result;
- rpcb_clnt = rpcb_create_local(addr, addrlen, version);
- if (!IS_ERR(rpcb_clnt)) {
- error = rpc_call_sync(rpcb_clnt, msg, 0);
- rpc_shutdown_client(rpcb_clnt);
- } else
- error = PTR_ERR(rpcb_clnt);
-
+ error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
if (port)
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
- return rpcb_register_call(RPCBVERS_2, &msg);
+ return rpcb_register_call(rpcb_local_clnt, &msg);
}
/*
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(RPCBVERS_4, msg);
+ return rpcb_register_call(rpcb_local_clnt4, msg);
}
/**
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version,
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
+ if (rpcb_local_clnt4 == NULL)
+ return -EPROTONOSUPPORT;
if (address == NULL)
return rpcb_unregister_all_protofamilies(&msg);
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
.rpc_message = &msg,
.callback_ops = &rpcb_getport_ops,
.callback_data = map,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
};
return rpc_run_task(&task_setup_data);
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = {
.version = rpcb_version,
.stats = &rpcb_stats,
};
+
+/**
+ * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
+ *
+ */
+void cleanup_rpcb_clnt(void)
+{
+ if (rpcb_local_clnt4)
+ rpc_shutdown_client(rpcb_local_clnt4);
+ if (rpcb_local_clnt)
+ rpc_shutdown_client(rpcb_local_clnt);
+}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8cce92189019..f438347d817b 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,8 @@
extern struct cache_detail ip_map_cache, unix_gid_cache;
+extern void cleanup_rpcb_clnt(void);
+
static int __init
init_sunrpc(void)
{
@@ -53,6 +55,7 @@ out:
static void __exit
cleanup_sunrpc(void)
{
+ cleanup_rpcb_clnt();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fd46d42afa89..469de292c23c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task)
}
if (!xprt_lock_write(xprt, task))
return;
+
+ if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ xprt->ops->close(xprt);
+
if (xprt_connected(xprt))
xprt_release_write(xprt, task);
else {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04732d09013e..3d739e5d15d8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task)
if (xprt_test_and_set_connecting(xprt))
return;
- if (transport->sock != NULL) {
+ if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
xprt, xprt->reestablish_timeout / HZ);
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 81a67a458e78..445e8845f0a4 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
use strict;
my $P = $0;
-my $V = '0.21';
+my $V = '0.23';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -23,16 +23,19 @@ my $email_usename = 1;
my $email_maintainer = 1;
my $email_list = 1;
my $email_subscriber_list = 0;
-my $email_git = 1;
my $email_git_penguin_chiefs = 0;
+my $email_git = 1;
+my $email_git_blame = 0;
my $email_git_min_signatures = 1;
my $email_git_max_maintainers = 5;
my $email_git_min_percent = 5;
my $email_git_since = "1-year-ago";
-my $email_git_blame = 0;
+my $email_hg_since = "-365";
my $email_remove_duplicates = 1;
my $output_multiline = 1;
my $output_separator = ", ";
+my $output_roles = 0;
+my $output_rolestats = 0;
my $scm = 0;
my $web = 0;
my $subsystem = 0;
@@ -64,21 +67,52 @@ my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
my $rfc822_char = '[\\000-\\377]';
+# VCS command support: class-like functions and strings
+
+my %VCS_cmds;
+
+my %VCS_cmds_git = (
+ "execute_cmd" => \&git_execute_cmd,
+ "available" => '(which("git") ne "") && (-d ".git")',
+ "find_signers_cmd" => "git log --since=\$email_git_since -- \$file",
+ "find_commit_signers_cmd" => "git log -1 \$commit",
+ "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
+ "blame_file_cmd" => "git blame -l \$file",
+ "commit_pattern" => "^commit [0-9a-f]{40,40}",
+ "blame_commit_pattern" => "^([0-9a-f]+) "
+);
+
+my %VCS_cmds_hg = (
+ "execute_cmd" => \&hg_execute_cmd,
+ "available" => '(which("hg") ne "") && (-d ".hg")',
+ "find_signers_cmd" =>
+ "hg log --date=\$email_hg_since" .
+ " --template='commit {node}\\n{desc}\\n' -- \$file",
+ "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit",
+ "blame_range_cmd" => "", # not supported
+ "blame_file_cmd" => "hg blame -c \$file",
+ "commit_pattern" => "^commit [0-9a-f]{40,40}",
+ "blame_commit_pattern" => "^([0-9a-f]+):"
+);
+
if (!GetOptions(
'email!' => \$email,
'git!' => \$email_git,
+ 'git-blame!' => \$email_git_blame,
'git-chief-penguins!' => \$email_git_penguin_chiefs,
'git-min-signatures=i' => \$email_git_min_signatures,
'git-max-maintainers=i' => \$email_git_max_maintainers,
'git-min-percent=i' => \$email_git_min_percent,
'git-since=s' => \$email_git_since,
- 'git-blame!' => \$email_git_blame,
+ 'hg-since=s' => \$email_hg_since,
'remove-duplicates!' => \$email_remove_duplicates,
'm!' => \$email_maintainer,
'n!' => \$email_usename,
'l!' => \$email_list,
's!' => \$email_subscriber_list,
'multiline!' => \$output_multiline,
+ 'roles!' => \$output_roles,
+ 'rolestats!' => \$output_rolestats,
'separator=s' => \$output_separator,
'subsystem!' => \$subsystem,
'status!' => \$status,
@@ -90,8 +124,7 @@ if (!GetOptions(
'v|version' => \$version,
'h|help' => \$help,
)) {
- usage();
- die "$P: invalid argument\n";
+ die "$P: invalid argument - use --help if necessary\n";
}
if ($help != 0) {
@@ -113,6 +146,10 @@ if ($output_separator ne ", ") {
$output_multiline = 0;
}
+if ($output_rolestats) {
+ $output_roles = 1;
+}
+
my $selections = $email + $scm + $status + $subsystem + $web;
if ($selections == 0) {
usage();
@@ -175,7 +212,7 @@ if ($email_remove_duplicates) {
next if ($line =~ m/^\s*$/);
my ($name, $address) = parse_email($line);
- $line = format_email($name, $address);
+ $line = format_email($name, $address, $email_usename);
next if ($line =~ m/^\s*$/);
@@ -207,12 +244,10 @@ foreach my $file (@ARGV) {
push(@files, $file);
if (-f $file && $keywords) {
open(FILE, "<$file") or die "$P: Can't open ${file}\n";
- while (<FILE>) {
- my $patch_line = $_;
- foreach my $line (keys %keyword_hash) {
- if ($patch_line =~ m/^.*$keyword_hash{$line}/x) {
- push(@keyword_tvi, $line);
- }
+ my $text = do { local($/) ; <FILE> };
+ foreach my $line (keys %keyword_hash) {
+ if ($text =~ m/$keyword_hash{$line}/x) {
+ push(@keyword_tvi, $line);
}
}
close(FILE);
@@ -304,11 +339,11 @@ foreach my $file (@files) {
}
if ($email && $email_git) {
- recent_git_signoffs($file);
+ vcs_file_signoffs($file);
}
if ($email && $email_git_blame) {
- git_assign_blame($file);
+ vcs_file_blame($file);
}
}
@@ -324,11 +359,11 @@ if ($email) {
if ($chief =~ m/^(.*):(.*)/) {
my $email_address;
- $email_address = format_email($1, $2);
+ $email_address = format_email($1, $2, $email_usename);
if ($email_git_penguin_chiefs) {
- push(@email_to, $email_address);
+ push(@email_to, [$email_address, 'chief penguin']);
} else {
- @email_to = grep(!/${email_address}/, @email_to);
+ @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
}
}
}
@@ -342,7 +377,7 @@ if ($email || $email_list) {
if ($email_list) {
@to = (@to, @list_to);
}
- output(uniq(@to));
+ output(merge_email(@to));
}
if ($scm) {
@@ -398,13 +433,16 @@ MAINTAINER field selection options:
--git-min-signatures => number of signatures required (default: 1)
--git-max-maintainers => maximum maintainers to add (default: 5)
--git-min-percent => minimum percentage of commits required (default: 5)
- --git-since => git history to use (default: 1-year-ago)
--git-blame => use git blame to find modified commits for patch or file
+ --git-since => git history to use (default: 1-year-ago)
+ --hg-since => hg history to use (default: -365)
--m => include maintainer(s) if any
--n => include name 'Full Name <addr\@domain.tld>'
--l => include list(s) if any
--s => include subscriber only list(s) if any
--remove-duplicates => minimize duplicate email names/addresses
+ --roles => show roles (status:subsystem, git-signer, list, etc...)
+ --rolestats => show roles and statistics (commits/total_commits, %)
--scm => print SCM tree(s) if any
--status => print status if any
--subsystem => print subsystem name if any
@@ -430,11 +468,24 @@ Notes:
directory are examined as git recurses directories.
Any specified X: (exclude) pattern matches are _not_ ignored.
Used with "--nogit", directory is used as a pattern match,
- no individual file within the directory or subdirectory
- is matched.
+ no individual file within the directory or subdirectory
+ is matched.
Used with "--git-blame", does not iterate all files in directory
Using "--git-blame" is slow and may add old committers and authors
that are no longer active maintainers to the output.
+ Using "--roles" or "--rolestats" with git send-email --cc-cmd or any
+ other automated tools that expect only ["name"] <email address>
+ may not work because of additional output after <email address>.
+ Using "--rolestats" and "--git-blame" shows the #/total=% commits,
+ not the percentage of the entire file authored. # of commits is
+ not a good measure of amount of code authored. 1 major commit may
+ contain a thousand lines, 5 trivial commits may modify a single line.
+ If git is not installed, but mercurial (hg) is installed and an .hg
+ repository exists, the following options apply to mercurial:
+ --git,
+ --git-min-signatures, --git-max-maintainers, --git-min-percent, and
+ --git-blame
+ Use --hg-since not --git-since to control date selection
EOT
}
@@ -493,7 +544,7 @@ sub parse_email {
}
sub format_email {
- my ($name, $address) = @_;
+ my ($name, $address, $usename) = @_;
my $formatted_email;
@@ -506,11 +557,11 @@ sub format_email {
$name = "\"$name\"";
}
- if ($email_usename) {
+ if ($usename) {
if ("$name" eq "") {
$formatted_email = "$address";
} else {
- $formatted_email = "$name <${address}>";
+ $formatted_email = "$name <$address>";
}
} else {
$formatted_email = $address;
@@ -547,6 +598,71 @@ sub find_ending_index {
return $index;
}
+sub get_maintainer_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $role;
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ for ($i = $start + 1; $i < $end; $i++) {
+ my $tv = $typevalue[$i];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ my $ptype = $1;
+ my $pvalue = $2;
+ if ($ptype eq "S") {
+ $role = $pvalue;
+ }
+ }
+ }
+
+ $role = lc($role);
+ if ($role eq "supported") {
+ $role = "supporter";
+ } elsif ($role eq "maintained") {
+ $role = "maintainer";
+ } elsif ($role eq "odd fixes") {
+ $role = "odd fixer";
+ } elsif ($role eq "orphan") {
+ $role = "orphan minder";
+ } elsif ($role eq "obsolete") {
+ $role = "obsolete minder";
+ } elsif ($role eq "buried alive in reporters") {
+ $role = "chief penguin";
+ }
+
+ return $role . ":" . $subsystem;
+}
+
+sub get_list_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ if ($subsystem eq "THE REST") {
+ $subsystem = "";
+ }
+
+ return $subsystem;
+}
+
sub add_categories {
my ($index) = @_;
@@ -564,17 +680,22 @@ sub add_categories {
if ($ptype eq "L") {
my $list_address = $pvalue;
my $list_additional = "";
+ my $list_role = get_list_role($i);
+
+ if ($list_role ne "") {
+ $list_role = ":" . $list_role;
+ }
if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
$list_address = $1;
$list_additional = $2;
}
if ($list_additional =~ m/subscribers-only/) {
if ($email_subscriber_list) {
- push(@list_to, $list_address);
+ push(@list_to, [$list_address, "subscriber list${list_role}"]);
}
} else {
if ($email_list) {
- push(@list_to, $list_address);
+ push(@list_to, [$list_address, "open list${list_role}"]);
}
}
} elsif ($ptype eq "M") {
@@ -585,13 +706,14 @@ sub add_categories {
if ($tv =~ m/^(\C):\s*(.*)/) {
if ($1 eq "P") {
$name = $2;
- $pvalue = format_email($name, $address);
+ $pvalue = format_email($name, $address, $email_usename);
}
}
}
}
if ($email_maintainer) {
- push_email_addresses($pvalue);
+ my $role = get_maintainer_role($i);
+ push_email_addresses($pvalue, $role);
}
} elsif ($ptype eq "T") {
push(@scm, $pvalue);
@@ -618,7 +740,7 @@ sub email_inuse {
}
sub push_email_address {
- my ($line) = @_;
+ my ($line, $role) = @_;
my ($name, $address) = parse_email($line);
@@ -627,9 +749,9 @@ sub push_email_address {
}
if (!$email_remove_duplicates) {
- push(@email_to, format_email($name, $address));
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
} elsif (!email_inuse($name, $address)) {
- push(@email_to, format_email($name, $address));
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
$email_hash_name{$name}++;
$email_hash_address{$address}++;
}
@@ -638,24 +760,52 @@ sub push_email_address {
}
sub push_email_addresses {
- my ($address) = @_;
+ my ($address, $role) = @_;
my @address_list = ();
if (rfc822_valid($address)) {
- push_email_address($address);
+ push_email_address($address, $role);
} elsif (@address_list = rfc822_validlist($address)) {
my $array_count = shift(@address_list);
while (my $entry = shift(@address_list)) {
- push_email_address($entry);
+ push_email_address($entry, $role);
}
} else {
- if (!push_email_address($address)) {
+ if (!push_email_address($address, $role)) {
warn("Invalid MAINTAINERS address: '" . $address . "'\n");
}
}
}
+sub add_role {
+ my ($line, $role) = @_;
+
+ my ($name, $address) = parse_email($line);
+ my $email = format_email($name, $address, $email_usename);
+
+ foreach my $entry (@email_to) {
+ if ($email_remove_duplicates) {
+ my ($entry_name, $entry_address) = parse_email($entry->[0]);
+ if ($name eq $entry_name || $address eq $entry_address) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ } else {
+ if ($email eq $entry->[0]) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ }
+ }
+}
+
sub which {
my ($bin) = @_;
@@ -669,7 +819,7 @@ sub which {
}
sub mailmap {
- my @lines = @_;
+ my (@lines) = @_;
my %hash;
foreach my $line (@lines) {
@@ -678,14 +828,14 @@ sub mailmap {
$hash{$name} = $address;
} elsif ($address ne $hash{$name}) {
$address = $hash{$name};
- $line = format_email($name, $address);
+ $line = format_email($name, $address, $email_usename);
}
if (exists($mailmap{$name})) {
my $obj = $mailmap{$name};
foreach my $map_address (@$obj) {
if (($map_address eq $address) &&
($map_address ne $hash{$name})) {
- $line = format_email($name, $hash{$name});
+ $line = format_email($name, $hash{$name}, $email_usename);
}
}
}
@@ -694,34 +844,38 @@ sub mailmap {
return @lines;
}
-sub recent_git_signoffs {
- my ($file) = @_;
-
- my $sign_offs = "";
- my $cmd = "";
- my $output = "";
- my $count = 0;
+sub git_execute_cmd {
+ my ($cmd) = @_;
my @lines = ();
- my %hash;
- my $total_sign_offs;
- if (which("git") eq "") {
- warn("$P: git not found. Add --nogit to options?\n");
- return;
- }
- if (!(-d ".git")) {
- warn("$P: .git directory not found. Use a git repository for better results.\n");
- warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n");
- return;
- }
+ my $output = `$cmd`;
+ $output =~ s/^\s*//gm;
+ @lines = split("\n", $output);
- $cmd = "git log --since=${email_git_since} -- ${file}";
+ return @lines;
+}
- $output = `${cmd}`;
- $output =~ s/^\s*//gm;
+sub hg_execute_cmd {
+ my ($cmd) = @_;
+ my @lines = ();
+ my $output = `$cmd`;
@lines = split("\n", $output);
+ return @lines;
+}
+
+sub vcs_find_signers {
+ my ($cmd) = @_;
+ my @lines = ();
+ my $commits;
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ my $pattern = $VCS_cmds{"commit_pattern"};
+
+ $commits = grep(/$pattern/, @lines); # of commits
+
@lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
if (!$email_git_penguin_chiefs) {
@lines = grep(!/${penguin_chiefs}/i, @lines);
@@ -729,111 +883,183 @@ sub recent_git_signoffs {
# cut -f2- -d":"
s/.*:\s*(.+)\s*/$1/ for (@lines);
- $total_sign_offs = @lines;
+## Reformat email addresses (with names) to avoid badly written signatures
- if ($email_remove_duplicates) {
- @lines = mailmap(@lines);
+ foreach my $line (@lines) {
+ my ($name, $address) = parse_email($line);
+ $line = format_email($name, $address, 1);
}
- @lines = sort(@lines);
-
- # uniq -c
- $hash{$_}++ for @lines;
-
- # sort -rn
- foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
- my $sign_offs = $hash{$line};
- $count++;
- last if ($sign_offs < $email_git_min_signatures ||
- $count > $email_git_max_maintainers ||
- $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
- push_email_address($line);
- }
+ return ($commits, @lines);
}
-sub save_commits {
- my ($cmd, @commits) = @_;
- my $output;
+sub vcs_save_commits {
+ my ($cmd) = @_;
my @lines = ();
+ my @commits = ();
- $output = `${cmd}`;
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
- @lines = split("\n", $output);
foreach my $line (@lines) {
- if ($line =~ m/^(\w+) /) {
- push (@commits, $1);
+ if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) {
+ push(@commits, $1);
}
}
+
return @commits;
}
-sub git_assign_blame {
+sub vcs_blame {
my ($file) = @_;
-
- my @lines = ();
- my @commits = ();
my $cmd;
- my $output;
- my %hash;
- my $total_sign_offs;
- my $count;
+ my @commits = ();
+
+ return @commits if (!(-f $file));
+
+ if (@range && $VCS_cmds{"blame_range_cmd"} eq "") {
+ my @all_commits = ();
+
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @all_commits = vcs_save_commits($cmd);
- if (@range) {
foreach my $file_range_diff (@range) {
next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
my $diff_file = $1;
my $diff_start = $2;
my $diff_length = $3;
- next if (!("$file" eq "$diff_file"));
- $cmd = "git blame -l -L $diff_start,+$diff_length $file";
- @commits = save_commits($cmd, @commits);
+ next if ("$file" ne "$diff_file");
+ for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) {
+ push(@commits, $all_commits[$i]);
+ }
}
- } else {
- if (-f $file) {
- $cmd = "git blame -l $file";
- @commits = save_commits($cmd, @commits);
+ } elsif (@range) {
+ foreach my $file_range_diff (@range) {
+ next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
+ my $diff_file = $1;
+ my $diff_start = $2;
+ my $diff_length = $3;
+ next if ("$file" ne "$diff_file");
+ $cmd = $VCS_cmds{"blame_range_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ push(@commits, vcs_save_commits($cmd));
}
+ } else {
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @commits = vcs_save_commits($cmd);
}
- $total_sign_offs = 0;
- @commits = uniq(@commits);
- foreach my $commit (@commits) {
- $cmd = "git log -1 ${commit}";
+ return @commits;
+}
- $output = `${cmd}`;
- $output =~ s/^\s*//gm;
- @lines = split("\n", $output);
+my $printed_novcs = 0;
+sub vcs_exists {
+ %VCS_cmds = %VCS_cmds_git;
+ return 1 if eval $VCS_cmds{"available"};
+ %VCS_cmds = %VCS_cmds_hg;
+ return 1 if eval $VCS_cmds{"available"};
+ %VCS_cmds = ();
+ if (!$printed_novcs) {
+ warn("$P: No supported VCS found. Add --nogit to options?\n");
+ warn("Using a git repository produces better results.\n");
+ warn("Try Linus Torvalds' latest git repository using:\n");
+ warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n");
+ $printed_novcs = 1;
+ }
+ return 0;
+}
- @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
- if (!$email_git_penguin_chiefs) {
- @lines = grep(!/${penguin_chiefs}/i, @lines);
- }
+sub vcs_assign {
+ my ($role, $divisor, @lines) = @_;
- # cut -f2- -d":"
- s/.*:\s*(.+)\s*/$1/ for (@lines);
+ my %hash;
+ my $count = 0;
- $total_sign_offs += @lines;
+ return if (@lines <= 0);
- if ($email_remove_duplicates) {
- @lines = mailmap(@lines);
- }
+ if ($divisor <= 0) {
+ warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n");
+ $divisor = 1;
+ }
- $hash{$_}++ for @lines;
+ if ($email_remove_duplicates) {
+ @lines = mailmap(@lines);
}
- $count = 0;
+ @lines = sort(@lines);
+
+ # uniq -c
+ $hash{$_}++ for @lines;
+
+ # sort -rn
foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
my $sign_offs = $hash{$line};
+ my $percent = $sign_offs * 100 / $divisor;
+
+ $percent = 100 if ($percent > 100);
$count++;
last if ($sign_offs < $email_git_min_signatures ||
$count > $email_git_max_maintainers ||
- $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
- push_email_address($line);
+ $percent < $email_git_min_percent);
+ push_email_address($line, '');
+ if ($output_rolestats) {
+ my $fmt_percent = sprintf("%.0f", $percent);
+ add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%");
+ } else {
+ add_role($line, $role);
+ }
+ }
+}
+
+sub vcs_file_signoffs {
+ my ($file) = @_;
+
+ my @signers = ();
+ my $commits;
+
+ return if (!vcs_exists());
+
+ my $cmd = $VCS_cmds{"find_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
+
+ ($commits, @signers) = vcs_find_signers($cmd);
+ vcs_assign("commit_signer", $commits, @signers);
+}
+
+sub vcs_file_blame {
+ my ($file) = @_;
+
+ my @signers = ();
+ my @commits = ();
+ my $total_commits;
+
+ return if (!vcs_exists());
+
+ @commits = vcs_blame($file);
+ @commits = uniq(@commits);
+ $total_commits = @commits;
+
+ foreach my $commit (@commits) {
+ my $commit_count;
+ my @commit_signers = ();
+
+ my $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+ push(@signers, @commit_signers);
+ }
+
+ if ($from_filename) {
+ vcs_assign("commits", $total_commits, @signers);
+ } else {
+ vcs_assign("modified commits", $total_commits, @signers);
}
}
sub uniq {
- my @parms = @_;
+ my (@parms) = @_;
my %saw;
@parms = grep(!$saw{$_}++, @parms);
@@ -841,7 +1067,7 @@ sub uniq {
}
sub sort_and_uniq {
- my @parms = @_;
+ my (@parms) = @_;
my %saw;
@parms = sort @parms;
@@ -849,8 +1075,27 @@ sub sort_and_uniq {
return @parms;
}
+sub merge_email {
+ my @lines;
+ my %saw;
+
+ for (@_) {
+ my ($address, $role) = @$_;
+ if (!$saw{$address}) {
+ if ($output_roles) {
+ push(@lines, "$address ($role)");
+ } else {
+ push(@lines, $address);
+ }
+ $saw{$address} = 1;
+ }
+ }
+
+ return @lines;
+}
+
sub output {
- my @parms = @_;
+ my (@parms) = @_;
if ($output_multiline) {
foreach my $line (@parms) {
@@ -947,11 +1192,9 @@ sub rfc822_validlist ($) {
if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
$s =~ m/^$rfc822_char*$/) {
while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
- push @r, $1;
+ push(@r, $1);
}
return wantarray ? (scalar(@r), @r) : 1;
}
- else {
- return wantarray ? () : 0;
- }
+ return wantarray ? () : 0;
}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index f0d14452632b..9cf0a6fad6ba 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -295,6 +295,9 @@ if ($arch eq "x86_64") {
$ld .= " -m elf64_sparc";
$cc .= " -m64";
$objcopy .= " -O elf64-sparc";
+} elsif ($arch eq "microblaze") {
+ # Microblaze calls '_mcount' instead of plain 'mcount'.
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
}
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index b4b48afb6de6..5d9411839cd7 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -159,7 +159,7 @@ static int pxa2xx_ac97_resume(struct device *dev)
return ret;
}
-static struct dev_pm_ops pxa2xx_ac97_pm_ops = {
+static const struct dev_pm_ops pxa2xx_ac97_pm_ops = {
.suspend = pxa2xx_ac97_suspend,
.resume = pxa2xx_ac97_resume,
};
diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c
index 661205c4dcea..af888a022fc0 100644
--- a/sound/isa/gus/gus_mem.c
+++ b/sound/isa/gus/gus_mem.c
@@ -127,7 +127,8 @@ static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
- if (!memcmp(share_id, block->share_id, sizeof(share_id)))
+ if (!memcmp(share_id, block->share_id,
+ sizeof(block->share_id)))
return block;
return NULL;
}
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 20cb60afb200..c11920623009 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -2122,7 +2122,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
}
/* nothing should be in powerdown mode */
snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0);
- end_time = jiffies + msecs_to_jiffies(120);
+ end_time = jiffies + msecs_to_jiffies(5000);
do {
if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f)
goto __ready_ok;
diff --git a/sound/pci/cs5535audio/Makefile b/sound/pci/cs5535audio/Makefile
index fda7a94c992f..ccc642269b9e 100644
--- a/sound/pci/cs5535audio/Makefile
+++ b/sound/pci/cs5535audio/Makefile
@@ -4,9 +4,7 @@
snd-cs5535audio-y := cs5535audio.o cs5535audio_pcm.o
snd-cs5535audio-$(CONFIG_PM) += cs5535audio_pm.o
-ifdef CONFIG_MGEODE_LX
snd-cs5535audio-$(CONFIG_OLPC) += cs5535audio_olpc.o
-endif
# Toplevel Module Dependency
obj-$(CONFIG_SND_CS5535AUDIO) += snd-cs5535audio.o
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 05f56e04849b..91e7faf69bbb 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -389,6 +389,7 @@ probefail_out:
static void __devexit snd_cs5535audio_remove(struct pci_dev *pci)
{
+ olpc_quirks_cleanup();
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 7a298ac662e3..51966d782a3c 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -99,10 +99,11 @@ int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state);
int snd_cs5535audio_resume(struct pci_dev *pci);
#endif
-#if defined(CONFIG_OLPC) && defined(CONFIG_MGEODE_LX)
+#ifdef CONFIG_OLPC
void __devinit olpc_prequirks(struct snd_card *card,
struct snd_ac97_template *ac97);
int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97);
+void __devexit olpc_quirks_cleanup(void);
void olpc_analog_input(struct snd_ac97 *ac97, int on);
void olpc_mic_bias(struct snd_ac97 *ac97, int on);
@@ -128,6 +129,7 @@ static inline int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
{
return 0;
}
+static inline void olpc_quirks_cleanup(void) { }
static inline void olpc_analog_input(struct snd_ac97 *ac97, int on) { }
static inline void olpc_mic_bias(struct snd_ac97 *ac97, int on) { }
static inline void olpc_capture_open(struct snd_ac97 *ac97) { }
diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c
index 5c6814335cd7..50da49be9ae5 100644
--- a/sound/pci/cs5535audio/cs5535audio_olpc.c
+++ b/sound/pci/cs5535audio/cs5535audio_olpc.c
@@ -13,10 +13,13 @@
#include <sound/info.h>
#include <sound/control.h>
#include <sound/ac97_codec.h>
+#include <linux/gpio.h>
#include <asm/olpc.h>
#include "cs5535audio.h"
+#define DRV_NAME "cs5535audio-olpc"
+
/*
* OLPC has an additional feature on top of the regular AD1888 codec features.
* It has an Analog Input mode that is switched into (after disabling the
@@ -38,10 +41,7 @@ void olpc_analog_input(struct snd_ac97 *ac97, int on)
}
/* set Analog Input through GPIO */
- if (on)
- geode_gpio_set(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
- else
- geode_gpio_clear(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
+ gpio_set_value(OLPC_GPIO_MIC_AC, on);
}
/*
@@ -73,8 +73,7 @@ static int olpc_dc_info(struct snd_kcontrol *kctl,
static int olpc_dc_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v)
{
- v->value.integer.value[0] = geode_gpio_isset(OLPC_GPIO_MIC_AC,
- GPIO_OUTPUT_VAL);
+ v->value.integer.value[0] = gpio_get_value(OLPC_GPIO_MIC_AC);
return 0;
}
@@ -153,6 +152,12 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
if (!machine_is_olpc())
return 0;
+ if (gpio_request(OLPC_GPIO_MIC_AC, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME ": unable to allocate MIC GPIO\n");
+ return -EIO;
+ }
+ gpio_direction_output(OLPC_GPIO_MIC_AC, 0);
+
/* drop the original AD1888 HPF control */
memset(&elem, 0, sizeof(elem));
elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -169,11 +174,18 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
for (i = 0; i < ARRAY_SIZE(olpc_cs5535audio_ctls); i++) {
err = snd_ctl_add(card, snd_ctl_new1(&olpc_cs5535audio_ctls[i],
ac97->private_data));
- if (err < 0)
+ if (err < 0) {
+ gpio_free(OLPC_GPIO_MIC_AC);
return err;
+ }
}
/* turn off the mic by default */
olpc_mic_bias(ac97, 0);
return 0;
}
+
+void __devexit olpc_quirks_cleanup(void)
+{
+ gpio_free(OLPC_GPIO_MIC_AC);
+}
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index d24328661c6a..40ccb419b6e9 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/firmware.h>
#include <sound/core.h>
#include "hda_codec.h"
@@ -428,8 +429,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
char *key, *val;
struct hda_hint *hint;
- while (isspace(*buf))
- buf++;
+ buf = skip_spaces(buf);
if (!*buf || *buf == '#' || *buf == '\n')
return 0;
if (*buf == '=')
@@ -444,8 +444,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
return -EINVAL;
}
*val++ = 0;
- while (isspace(*val))
- val++;
+ val = skip_spaces(val);
remove_trail_spaces(key);
remove_trail_spaces(val);
hint = get_hint(codec, key);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e54420e691ae..9b56f937913e 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2713,6 +2713,9 @@ static struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA },
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 447eda1f6770..1a36137e13ec 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1789,6 +1789,14 @@ static int patch_ad1981(struct hda_codec *codec)
codec->patch_ops.init = ad1981_hp_init;
codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
+ /* set the upper-limit for mixer amp to 0dB for avoiding the
+ * possible damage by overloading
+ */
+ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
+ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (1 << AC_AMPCAP_MUTE_SHIFT));
break;
case AD1981_THINKPAD:
spec->mixers[0] = ad1981_thinkpad_mixers;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 888b6313eeca..aeed4cc5aa79 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6248,6 +6248,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
static struct snd_pci_quirk alc260_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
+ SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 7717e01fc071..edaa729126bb 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -143,7 +143,8 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE;
- // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ /* FIXME: This driver should be updated to allow for dynamic IRQ sharing */
+ /* link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; */
link->irq.Handler = pdacf_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 5f1681f6ca76..2a27f7b56726 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -26,7 +26,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -175,7 +175,7 @@ static int twl4030_write(struct snd_soc_codec *codec,
{
twl4030_write_reg_cache(codec, reg, value);
if (likely(reg < TWL4030_REG_SW_SHADOW))
- return twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
+ return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
reg);
else
return 0;
@@ -261,7 +261,7 @@ static void twl4030_power_up(struct snd_soc_codec *codec)
do {
/* this takes a little while, so don't slam i2c */
udelay(2000);
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
TWL4030_REG_ANAMICL);
} while ((i++ < 100) &&
((byte & TWL4030_CNCL_OFFSET_START) ==
@@ -542,7 +542,7 @@ static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
break; \
case SND_SOC_DAPM_POST_PMD: \
reg_val = twl4030_read_reg_cache(w->codec, reg); \
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
reg_val & (~mask), \
reg); \
break; \
@@ -679,7 +679,7 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
twl4030->sysclk) + 1);
/* Bypass the reg_cache to mute the headset */
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
hs_gain & (~0x0f),
TWL4030_REG_HS_GAIN_SET);
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index f82125d9e85a..ebbf11b653a4 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1340,9 +1340,10 @@ static int wm8350_resume(struct platform_device *pdev)
return 0;
}
-static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
{
struct wm8350_data *priv = data;
+ struct wm8350 *wm8350 = priv->codec.control_data;
u16 reg;
int report;
int mask;
@@ -1365,7 +1366,7 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
if (!jack->jack) {
dev_warn(wm8350->dev, "Jack interrupt called with no jack\n");
- return;
+ return IRQ_NONE;
}
/* Debounce */
@@ -1378,6 +1379,8 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
report = 0;
snd_soc_jack_report(jack->jack, report, jack->report);
+
+ return IRQ_HANDLED;
}
/**
@@ -1421,9 +1424,7 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena);
/* Sync status */
- wm8350_hp_jack_handler(wm8350, irq, priv);
-
- wm8350_unmask_irq(wm8350, irq);
+ wm8350_hp_jack_handler(irq, priv);
return 0;
}
@@ -1482,12 +1483,16 @@ static int wm8350_probe(struct platform_device *pdev)
wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME,
WM8350_OUT2_VU | WM8350_OUT2R_MUTE);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
+ /* Make sure jack detect is disabled to start off with */
+ wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
+ WM8350_JDL_ENA | WM8350_JDR_ENA);
+
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L,
- wm8350_hp_jack_handler, priv);
+ wm8350_hp_jack_handler, 0, "Left jack detect",
+ priv);
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R,
- wm8350_hp_jack_handler, priv);
+ wm8350_hp_jack_handler, 0, "Right jack detect",
+ priv);
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
@@ -1516,8 +1521,6 @@ static int wm8350_remove(struct platform_device *pdev)
WM8350_JDL_ENA | WM8350_JDR_ENA);
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index c9438dd62df3..dbc368c08263 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -199,7 +199,7 @@ static void wm8900_reset(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8900_REG_RESET, 0);
memcpy(codec->reg_cache, wm8900_reg_defaults,
- sizeof(codec->reg_cache));
+ sizeof(wm8900_reg_defaults));
}
static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
index d441c3b64631..4984754f3298 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.c
@@ -312,7 +312,7 @@ int simtec_audio_resume(struct device *dev)
return 0;
}
-struct dev_pm_ops simtec_audio_pmops = {
+const struct dev_pm_ops simtec_audio_pmops = {
.resume = simtec_audio_resume,
};
EXPORT_SYMBOL_GPL(simtec_audio_pmops);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
index 2714203af161..e18faee30cce 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.h
@@ -15,7 +15,7 @@ extern int simtec_audio_core_probe(struct platform_device *pdev,
extern int simtec_audio_remove(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern struct dev_pm_ops simtec_audio_pmops;
+extern const struct dev_pm_ops simtec_audio_pmops;
#define simtec_audio_pm &simtec_audio_pmops
#else
#define simtec_audio_pm NULL
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index ef8f28284cb9..0a6440c6f54a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1236,7 +1236,7 @@ static int soc_poweroff(struct device *dev)
return 0;
}
-static struct dev_pm_ops soc_pm_ops = {
+static const struct dev_pm_ops soc_pm_ops = {
.suspend = soc_suspend,
.resume = soc_resume,
.poweroff = soc_poweroff,
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 23ec66098bdc..406999668cab 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -237,8 +237,8 @@ lib = lib
export prefix bindir sharedir sysconfdir
-CC = gcc
-AR = ar
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
RM = rm -f
TAR = tar
FIND = find
@@ -356,7 +356,9 @@ LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
LIB_H += util/quote.h
LIB_H += util/util.h
+LIB_H += util/header.h
LIB_H += util/help.h
+LIB_H += util/session.h
LIB_H += util/strbuf.h
LIB_H += util/string.h
LIB_H += util/strlist.h
@@ -405,6 +407,7 @@ LIB_OBJS += util/callchain.o
LIB_OBJS += util/values.o
LIB_OBJS += util/debug.o
LIB_OBJS += util/map.o
+LIB_OBJS += util/session.o
LIB_OBJS += util/thread.o
LIB_OBJS += util/trace-event-parse.o
LIB_OBJS += util/trace-event-read.o
@@ -492,8 +495,10 @@ else
LIB_OBJS += util/probe-finder.o
endif
+ifndef NO_LIBPERL
PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+endif
ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DNO_LIBPERL
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 605a2a959aa8..81cee78181fa 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -1,6 +1,6 @@
/*
*
- * builtin-bench-messaging.c
+ * sched-messaging.c
*
* messaging: Benchmark for scheduler and IPC mechanisms
*
@@ -320,10 +320,12 @@ int bench_sched_messaging(int argc, const char **argv,
num_groups, num_groups * 2 * num_fds,
thread_mode ? "threads" : "processes");
printf(" %14s: %lu.%03lu [sec]\n", "Total time",
- diff.tv_sec, diff.tv_usec/1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
break;
case BENCH_FORMAT_SIMPLE:
- printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000);
+ printf("%lu.%03lu\n", diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
break;
default:
/* reaching here is something disaster */
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 238185f97977..4f77c7c27640 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -1,6 +1,6 @@
/*
*
- * builtin-bench-pipe.c
+ * sched-pipe.c
*
* pipe: Benchmark for pipe()
*
@@ -87,7 +87,8 @@ int bench_sched_pipe(int argc, const char **argv,
if (pid) {
retpid = waitpid(pid, &wait_stat, 0);
assert((retpid == pid) && WIFEXITED(wait_stat));
- return 0;
+ } else {
+ exit(0);
}
switch (bench_format) {
@@ -99,7 +100,8 @@ int bench_sched_pipe(int argc, const char **argv,
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
- diff.tv_sec, diff.tv_usec/1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
printf(" %14lf usecs/op\n",
(double)result_usec / (double)loops);
@@ -110,7 +112,8 @@ int bench_sched_pipe(int argc, const char **argv,
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
- diff.tv_sec, diff.tv_usec / 1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec / 1000));
break;
default:
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 0bf2e8f9af57..21a78d30d53d 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -25,6 +25,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/session.h"
#include "util/data_map.h"
static char const *input_name = "perf.data";
@@ -462,21 +463,23 @@ static struct perf_file_handler file_handler = {
static int __cmd_annotate(void)
{
- struct perf_header *header;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, force);
struct thread *idle;
int ret;
+ if (session == NULL)
+ return -ENOMEM;
+
idle = register_idle_thread();
register_perf_file_handler(&file_handler);
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
if (ret)
- return ret;
+ goto out_delete;
if (dump_trace) {
event__print_totals();
- return 0;
+ goto out_delete;
}
if (verbose > 3)
@@ -489,6 +492,8 @@ static int __cmd_annotate(void)
output__resort(event__total[0]);
find_annotations();
+out_delete:
+ perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index e043eb83092a..46996774e559 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -31,6 +31,9 @@ struct bench_suite {
const char *summary;
int (*fn)(int, const char **, const char *);
};
+ \
+/* sentinel: easy for help */
+#define suite_all { "all", "test all suite (pseudo suite)", NULL }
static struct bench_suite sched_suites[] = {
{ "messaging",
@@ -39,6 +42,7 @@ static struct bench_suite sched_suites[] = {
{ "pipe",
"Flood of communication over pipe() between two processes",
bench_sched_pipe },
+ suite_all,
{ NULL,
NULL,
NULL }
@@ -48,6 +52,7 @@ static struct bench_suite mem_suites[] = {
{ "memcpy",
"Simple memory copy in various ways",
bench_mem_memcpy },
+ suite_all,
{ NULL,
NULL,
NULL }
@@ -66,6 +71,9 @@ static struct bench_subsys subsystems[] = {
{ "mem",
"memory access performance",
mem_suites },
+ { "all", /* sentinel: easy for help */
+ "test all subsystem (pseudo subsystem)",
+ NULL },
{ NULL,
NULL,
NULL }
@@ -75,11 +83,11 @@ static void dump_suites(int subsys_index)
{
int i;
- printf("List of available suites for %s...\n\n",
+ printf("# List of available suites for %s...\n\n",
subsystems[subsys_index].name);
for (i = 0; subsystems[subsys_index].suites[i].name; i++)
- printf("\t%s: %s\n",
+ printf("%14s: %s\n",
subsystems[subsys_index].suites[i].name,
subsystems[subsys_index].suites[i].summary);
@@ -110,10 +118,10 @@ static void print_usage(void)
printf("\t%s\n", bench_usage[i]);
printf("\n");
- printf("List of available subsystems...\n\n");
+ printf("# List of available subsystems...\n\n");
for (i = 0; subsystems[i].name; i++)
- printf("\t%s: %s\n",
+ printf("%14s: %s\n",
subsystems[i].name, subsystems[i].summary);
printf("\n");
}
@@ -131,6 +139,37 @@ static int bench_str2int(char *str)
return BENCH_FORMAT_UNKNOWN;
}
+static void all_suite(struct bench_subsys *subsys) /* FROM HERE */
+{
+ int i;
+ const char *argv[2];
+ struct bench_suite *suites = subsys->suites;
+
+ argv[1] = NULL;
+ /*
+ * TODO:
+ * preparing preset parameters for
+ * embedded, ordinary PC, HPC, etc...
+ * will be helpful
+ */
+ for (i = 0; suites[i].fn; i++) {
+ printf("# Running %s/%s benchmark...\n",
+ subsys->name,
+ suites[i].name);
+
+ argv[1] = suites[i].name;
+ suites[i].fn(1, argv, NULL);
+ printf("\n");
+ }
+}
+
+static void all_subsystem(void)
+{
+ int i;
+ for (i = 0; subsystems[i].suites; i++)
+ all_suite(&subsystems[i]);
+}
+
int cmd_bench(int argc, const char **argv, const char *prefix __used)
{
int i, j, status = 0;
@@ -155,6 +194,11 @@ int cmd_bench(int argc, const char **argv, const char *prefix __used)
goto end;
}
+ if (!strcmp(argv[0], "all")) {
+ all_subsystem();
+ goto end;
+ }
+
for (i = 0; subsystems[i].name; i++) {
if (strcmp(subsystems[i].name, argv[0]))
continue;
@@ -165,6 +209,11 @@ int cmd_bench(int argc, const char **argv, const char *prefix __used)
goto end;
}
+ if (!strcmp(argv[1], "all")) {
+ all_suite(&subsystems[i]);
+ goto end;
+ }
+
for (j = 0; subsystems[i].suites[j].name; j++) {
if (strcmp(subsystems[i].suites[j].name, argv[1]))
continue;
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index dcb6143a0002..bfd16a1594e4 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -11,8 +11,8 @@
#include "util/cache.h"
#include "util/data_map.h"
#include "util/debug.h"
-#include "util/header.h"
#include "util/parse-options.h"
+#include "util/session.h"
#include "util/symbol.h"
static char const *input_name = "perf.data";
@@ -55,56 +55,17 @@ static int perf_file_section__process_buildids(struct perf_file_section *self,
static int __cmd_buildid_list(void)
{
int err = -1;
- struct perf_header *header;
- struct perf_file_header f_header;
- struct stat input_stat;
- int input = open(input_name, O_RDONLY);
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, force);
- if (input < 0) {
- pr_err("failed to open file: %s", input_name);
- if (!strcmp(input_name, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- goto out;
- }
-
- err = fstat(input, &input_stat);
- if (err < 0) {
- perror("failed to stat file");
- goto out_close;
- }
-
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file %s not owned by current user or root\n",
- input_name);
- goto out_close;
- }
-
- if (!input_stat.st_size) {
- pr_info("zero-sized file, nothing to do!\n");
- goto out_close;
- }
-
- err = -1;
- header = perf_header__new();
- if (header == NULL)
- goto out_close;
-
- if (perf_file_header__read(&f_header, header, input) < 0) {
- pr_warning("incompatible file format");
- goto out_close;
- }
+ if (session == NULL)
+ return -1;
- err = perf_header__process_sections(header, input,
+ err = perf_header__process_sections(&session->header, session->fd,
perf_file_section__process_buildids);
+ if (err >= 0)
+ dsos__fprintf_buildid(stdout);
- if (err < 0)
- goto out_close;
-
- dsos__fprintf_buildid(stdout);
-out_close:
- close(input);
-out:
+ perf_session__delete(session);
return err;
}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 5f209514f657..2071d2485913 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,6 +6,7 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -20,7 +21,6 @@ typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
static char const *input_name = "perf.data";
-static struct perf_header *header;
static u64 sample_type;
static int alloc_flag;
@@ -367,11 +367,18 @@ static struct perf_file_handler file_handler = {
static int read_events(void)
{
+ int err;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+
+ if (session == NULL)
+ return -ENOMEM;
+
register_idle_thread();
register_perf_file_handler(&file_handler);
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
+ perf_session__delete(session);
+ return err;
}
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -403,7 +410,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller)
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
- sym = thread__find_function(kthread, addr, NULL);
+ sym = map_groups__find_function(kmaps, addr, NULL);
} else
addr = data->ptr;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0e519c667e3a..4decbd14eaed 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -17,6 +17,7 @@
#include "util/header.h"
#include "util/event.h"
#include "util/debug.h"
+#include "util/session.h"
#include "util/symbol.h"
#include <unistd.h>
@@ -62,7 +63,7 @@ static int nr_cpu = 0;
static int file_new = 1;
-struct perf_header *header = NULL;
+static struct perf_session *session;
struct mmap_data {
int counter;
@@ -216,12 +217,12 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
{
struct perf_header_attr *h_attr;
- if (nr < header->attrs) {
- h_attr = header->attr[nr];
+ if (nr < session->header.attrs) {
+ h_attr = session->header.attr[nr];
} else {
h_attr = perf_header_attr__new(a);
if (h_attr != NULL)
- if (perf_header__add_attr(header, h_attr) < 0) {
+ if (perf_header__add_attr(&session->header, h_attr) < 0) {
perf_header_attr__delete(h_attr);
h_attr = NULL;
}
@@ -395,9 +396,9 @@ static void open_counters(int cpu, pid_t pid)
static void atexit_header(void)
{
- header->data_size += bytes_written;
+ session->header.data_size += bytes_written;
- perf_header__write(header, output, true);
+ perf_header__write(&session->header, output, true);
}
static int __cmd_record(int argc, const char **argv)
@@ -440,24 +441,24 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- header = perf_header__new();
- if (header == NULL) {
+ session = perf_session__new(output_name, O_WRONLY, force);
+ if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
if (!file_new) {
- err = perf_header__read(header, output);
+ err = perf_header__read(&session->header, output);
if (err < 0)
return err;
}
if (raw_samples) {
- perf_header__set_feat(header, HEADER_TRACE_INFO);
+ perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
} else {
for (i = 0; i < nr_counters; i++) {
if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
- perf_header__set_feat(header, HEADER_TRACE_INFO);
+ perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
break;
}
}
@@ -481,7 +482,7 @@ static int __cmd_record(int argc, const char **argv)
}
if (file_new) {
- err = perf_header__write(header, output, false);
+ err = perf_header__write(&session->header, output, false);
if (err < 0)
return err;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2b9eb3a553ed..e2ec49a9b731 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -22,6 +22,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -52,7 +53,7 @@ static int exclude_other = 1;
static char callchain_default_opt[] = "fractal,0.5";
-static struct perf_header *header;
+static struct perf_session *session;
static u64 sample_type;
@@ -701,7 +702,7 @@ static int process_read_event(event_t *event)
{
struct perf_event_attr *attr;
- attr = perf_header__find_attr(event->read.id, header);
+ attr = perf_header__find_attr(event->read.id, &session->header);
if (show_threads) {
const char *name = attr ? __event_name(attr->type, attr->config)
@@ -766,6 +767,10 @@ static int __cmd_report(void)
struct thread *idle;
int ret;
+ session = perf_session__new(input_name, O_RDONLY, force);
+ if (session == NULL)
+ return -ENOMEM;
+
idle = register_idle_thread();
thread__comm_adjust(idle);
@@ -774,14 +779,14 @@ static int __cmd_report(void)
register_perf_file_handler(&file_handler);
- ret = mmap_dispatch_perf_file(&header, input_name, force,
- full_paths, &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, full_paths,
+ &event__cwdlen, &event__cwd);
if (ret)
- return ret;
+ goto out_delete;
if (dump_trace) {
event__print_totals();
- return 0;
+ goto out_delete;
}
if (verbose > 3)
@@ -796,7 +801,8 @@ static int __cmd_report(void)
if (show_threads)
perf_read_values_destroy(&show_threads_values);
-
+out_delete:
+ perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7cca7c15b40a..65021fe1361e 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -6,6 +6,7 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -21,7 +22,6 @@
static char const *input_name = "perf.data";
-static struct perf_header *header;
static u64 sample_type;
static char default_sort_order[] = "avg, max, switch, runtime";
@@ -1663,11 +1663,18 @@ static struct perf_file_handler file_handler = {
static int read_events(void)
{
+ int err;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+
+ if (session == NULL)
+ return -ENOMEM;
+
register_idle_thread();
register_perf_file_handler(&file_handler);
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
+ perf_session__delete(session);
+ return err;
}
static void print_bad_events(void)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index f472df9561ee..759dd2b35fdb 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1059,15 +1059,17 @@ static struct perf_file_handler file_handler = {
static int __cmd_timechart(void)
{
- struct perf_header *header;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
int ret;
+ if (session == NULL)
+ return -ENOMEM;
+
register_perf_file_handler(&file_handler);
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
if (ret)
- return EXIT_FAILURE;
+ goto out_delete;
process_samples();
@@ -1079,8 +1081,9 @@ static int __cmd_timechart(void)
pr_info("Written %2.1f seconds of trace to %s.\n",
(last_time - first_time) / 1000000000.0, output_name);
-
- return EXIT_SUCCESS;
+out_delete:
+ perf_session__delete(session);
+ return ret;
}
static const char * const timechart_usage[] = {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c2fcc34486f5..0756664666f1 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -7,6 +7,7 @@
#include "util/header.h"
#include "util/exec_cmd.h"
#include "util/trace-event.h"
+#include "util/session.h"
static char const *script_name;
static char const *generate_script_lang;
@@ -61,7 +62,7 @@ static int cleanup_scripting(void)
static char const *input_name = "perf.data";
-static struct perf_header *header;
+static struct perf_session *session;
static u64 sample_type;
static int process_sample_event(event_t *event)
@@ -126,11 +127,18 @@ static struct perf_file_handler file_handler = {
static int __cmd_trace(void)
{
+ int err;
+
+ session = perf_session__new(input_name, O_RDONLY, 0);
+ if (session == NULL)
+ return -ENOMEM;
+
register_idle_thread();
register_perf_file_handler(&file_handler);
- return mmap_dispatch_perf_file(&header, input_name,
- 0, 0, &event__cwdlen, &event__cwd);
+ err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
+ perf_session__delete(session);
+ return err;
}
struct script_spec {
@@ -348,11 +356,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
return -1;
}
- header = perf_header__new();
- if (header == NULL)
- return -1;
-
- perf_header__read(header, input);
+ perf_header__read(&session->header, input);
err = scripting_ops->generate_script("perf-trace");
goto out;
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 454d5d55f32d..75f941bfba9e 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -59,6 +59,18 @@
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#endif
+#ifdef __arm__
+#include "../../arch/arm/include/asm/unistd.h"
+/*
+ * Use the __kuser_memory_barrier helper in the CPU helper page. See
+ * arch/arm/kernel/entry-armv.S in the kernel source for details.
+ */
+#define rmb() asm volatile("mov r0, #0xffff0fff; mov lr, pc;" \
+ "sub pc, r0, #95" ::: "r0", "lr", "cc", \
+ "memory")
+#define cpu_relax() asm volatile("":::"memory")
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
index 59b65d0bd7c1..6d46dda53a29 100644
--- a/tools/perf/util/data_map.c
+++ b/tools/perf/util/data_map.c
@@ -129,23 +129,16 @@ out:
return err;
}
-int mmap_dispatch_perf_file(struct perf_header **pheader,
- const char *input_name,
- int force,
- int full_paths,
- int *cwdlen,
- char **cwd)
+int perf_session__process_events(struct perf_session *self,
+ int full_paths, int *cwdlen, char **cwd)
{
int err;
- struct perf_header *header;
unsigned long head, shift;
unsigned long offset = 0;
- struct stat input_stat;
size_t page_size;
u64 sample_type;
event_t *event;
uint32_t size;
- int input;
char *buf;
if (curr_handler == NULL) {
@@ -155,56 +148,19 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
page_size = getpagesize();
- input = open(input_name, O_RDONLY);
- if (input < 0) {
- pr_err("Failed to open file: %s", input_name);
- if (!strcmp(input_name, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- return -errno;
- }
-
- if (fstat(input, &input_stat) < 0) {
- pr_err("failed to stat file");
- err = -errno;
- goto out_close;
- }
-
- err = -EACCES;
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file: %s not owned by current user or root\n",
- input_name);
- goto out_close;
- }
-
- if (input_stat.st_size == 0) {
- pr_info("zero-sized file, nothing to do!\n");
- goto done;
- }
-
- err = -ENOMEM;
- header = perf_header__new();
- if (header == NULL)
- goto out_close;
-
- err = perf_header__read(header, input);
- if (err < 0)
- goto out_delete;
- *pheader = header;
- head = header->data_offset;
-
- sample_type = perf_header__sample_type(header);
+ head = self->header.data_offset;
+ sample_type = perf_header__sample_type(&self->header);
err = -EINVAL;
if (curr_handler->sample_type_check &&
curr_handler->sample_type_check(sample_type) < 0)
- goto out_delete;
+ goto out_err;
if (!full_paths) {
if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
pr_err("failed to get the current directory\n");
err = -errno;
- goto out_delete;
+ goto out_err;
}
*cwd = __cwd;
*cwdlen = strlen(*cwd);
@@ -219,11 +175,11 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
remap:
buf = mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
+ MAP_SHARED, self->fd, offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_delete;
+ goto out_err;
}
more:
@@ -273,19 +229,14 @@ more:
head += size;
- if (offset + head >= header->data_offset + header->data_size)
+ if (offset + head >= self->header.data_offset + self->header.data_size)
goto done;
- if (offset + head < (unsigned long)input_stat.st_size)
+ if (offset + head < self->size)
goto more;
done:
err = 0;
-out_close:
- close(input);
-
+out_err:
return err;
-out_delete:
- perf_header__delete(header);
- goto out_close;
}
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h
index 258a87bcc4fb..98c5b823388c 100644
--- a/tools/perf/util/data_map.h
+++ b/tools/perf/util/data_map.h
@@ -3,6 +3,7 @@
#include "event.h"
#include "header.h"
+#include "session.h"
typedef int (*event_type_handler_t)(event_t *);
@@ -21,12 +22,8 @@ struct perf_file_handler {
};
void register_perf_file_handler(struct perf_file_handler *handler);
-int mmap_dispatch_perf_file(struct perf_header **pheader,
- const char *input_name,
- int force,
- int full_paths,
- int *cwdlen,
- char **cwd);
+int perf_session__process_events(struct perf_session *self,
+ int full_paths, int *cwdlen, char **cwd);
int perf_header__read_build_ids(int input, u64 offset, u64 file_size);
#endif
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 4dcecafa85dc..ba0de90cd3d4 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -254,13 +254,14 @@ void thread__find_addr_location(struct thread *self, u8 cpumode,
struct addr_location *al,
symbol_filter_t filter)
{
- struct thread *thread = al->thread = self;
+ struct map_groups *mg = &self->mg;
+ al->thread = self;
al->addr = addr;
if (cpumode & PERF_RECORD_MISC_KERNEL) {
al->level = 'k';
- thread = kthread;
+ mg = kmaps;
} else if (cpumode & PERF_RECORD_MISC_USER)
al->level = '.';
else {
@@ -270,7 +271,7 @@ void thread__find_addr_location(struct thread *self, u8 cpumode,
return;
}
try_again:
- al->map = thread__find_map(thread, type, al->addr);
+ al->map = map_groups__find(mg, type, al->addr);
if (al->map == NULL) {
/*
* If this is outside of all known maps, and is a negative
@@ -281,8 +282,8 @@ try_again:
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
- if ((long long)al->addr < 0 && thread != kthread) {
- thread = kthread;
+ if ((long long)al->addr < 0 && mg != kmaps) {
+ mg = kmaps;
goto try_again;
}
al->sym = NULL;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c7a78eef8e52..51a96c2effde 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -103,10 +103,11 @@ void event__print_totals(void);
enum map_type {
MAP__FUNCTION = 0,
-
- MAP__NR_TYPES,
+ MAP__VARIABLE,
};
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
struct map {
union {
struct rb_node rb_node;
@@ -150,6 +151,8 @@ int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
struct symbol *map__find_symbol(struct map *self, u64 addr,
symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter);
void map__fixup_start(struct map *self);
void map__fixup_end(struct map *self);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 59a9c0b3033e..f2e8d8715111 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -58,35 +58,19 @@ int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
return 0;
}
-/*
- * Create new perf.data header:
- */
-struct perf_header *perf_header__new(void)
+int perf_header__init(struct perf_header *self)
{
- struct perf_header *self = zalloc(sizeof(*self));
-
- if (self != NULL) {
- self->size = 1;
- self->attr = malloc(sizeof(void *));
-
- if (self->attr == NULL) {
- free(self);
- self = NULL;
- }
- }
-
- return self;
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+ return self->attr == NULL ? -ENOMEM : 0;
}
-void perf_header__delete(struct perf_header *self)
+void perf_header__exit(struct perf_header *self)
{
int i;
-
for (i = 0; i < self->attrs; ++i)
- perf_header_attr__delete(self->attr[i]);
-
+ perf_header_attr__delete(self->attr[i]);
free(self->attr);
- free(self);
}
int perf_header__add_attr(struct perf_header *self,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d1dbe2b79c42..d118d05d3abe 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -55,8 +55,8 @@ struct perf_header {
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
-struct perf_header *perf_header__new(void);
-void perf_header__delete(struct perf_header *self);
+int perf_header__init(struct perf_header *self);
+void perf_header__exit(struct perf_header *self);
int perf_header__read(struct perf_header *self, int fd);
int perf_header__write(struct perf_header *self, int fd, bool at_exit);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 69f94fe9db20..76bdca640a9b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -104,43 +104,64 @@ void map__fixup_end(struct map *self)
#define DSO__DELETED "(deleted)"
-struct symbol *map__find_symbol(struct map *self, u64 addr,
- symbol_filter_t filter)
+static int map__load(struct map *self, symbol_filter_t filter)
{
- if (!dso__loaded(self->dso, self->type)) {
- int nr = dso__load(self->dso, self, filter);
-
- if (nr < 0) {
- if (self->dso->has_build_id) {
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(self->dso->build_id,
- sizeof(self->dso->build_id),
- sbuild_id);
- pr_warning("%s with build id %s not found",
- self->dso->long_name, sbuild_id);
- } else
- pr_warning("Failed to open %s",
- self->dso->long_name);
- pr_warning(", continuing without symbols\n");
- return NULL;
- } else if (nr == 0) {
- const char *name = self->dso->long_name;
- const size_t len = strlen(name);
- const size_t real_len = len - sizeof(DSO__DELETED);
-
- if (len > sizeof(DSO__DELETED) &&
- strcmp(name + real_len + 1, DSO__DELETED) == 0) {
- pr_warning("%.*s was updated, restart the long running apps that use it!\n",
- (int)real_len, name);
- } else {
- pr_warning("no symbols found in %s, maybe install a debug package?\n", name);
- }
- return NULL;
+ const char *name = self->dso->long_name;
+ int nr = dso__load(self->dso, self, filter);
+
+ if (nr < 0) {
+ if (self->dso->has_build_id) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(self->dso->build_id,
+ sizeof(self->dso->build_id),
+ sbuild_id);
+ pr_warning("%s with build id %s not found",
+ name, sbuild_id);
+ } else
+ pr_warning("Failed to open %s", name);
+
+ pr_warning(", continuing without symbols\n");
+ return -1;
+ } else if (nr == 0) {
+ const size_t len = strlen(name);
+ const size_t real_len = len - sizeof(DSO__DELETED);
+
+ if (len > sizeof(DSO__DELETED) &&
+ strcmp(name + real_len + 1, DSO__DELETED) == 0) {
+ pr_warning("%.*s was updated, restart the long "
+ "running apps that use it!\n",
+ (int)real_len, name);
+ } else {
+ pr_warning("no symbols found in %s, maybe install "
+ "a debug package?\n", name);
}
+
+ return -1;
}
- return self->dso->find_symbol(self->dso, self->type, addr);
+ return 0;
+}
+
+struct symbol *map__find_symbol(struct map *self, u64 addr,
+ symbol_filter_t filter)
+{
+ if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0)
+ return NULL;
+
+ return dso__find_symbol(self->dso, self->type, addr);
+}
+
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter)
+{
+ if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0)
+ return NULL;
+
+ if (!dso__sorted_by_name(self->dso, self->type))
+ dso__sort_by_name(self->dso, self->type);
+
+ return dso__find_symbol_by_name(self->dso, self->type, name);
}
struct map *map__clone(struct map *self)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
new file mode 100644
index 000000000000..707ce1cb1621
--- /dev/null
+++ b/tools/perf/util/session.c
@@ -0,0 +1,80 @@
+#include <linux/kernel.h>
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "session.h"
+#include "util.h"
+
+static int perf_session__open(struct perf_session *self, bool force)
+{
+ struct stat input_stat;
+
+ self->fd = open(self->filename, O_RDONLY);
+ if (self->fd < 0) {
+ pr_err("failed to open file: %s", self->filename);
+ if (!strcmp(self->filename, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -errno;
+ }
+
+ if (fstat(self->fd, &input_stat) < 0)
+ goto out_close;
+
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (!input_stat.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (perf_header__read(&self->header, self->fd) < 0) {
+ pr_err("incompatible file format");
+ goto out_close;
+ }
+
+ self->size = input_stat.st_size;
+ return 0;
+
+out_close:
+ close(self->fd);
+ self->fd = -1;
+ return -1;
+}
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force)
+{
+ size_t len = strlen(filename) + 1;
+ struct perf_session *self = zalloc(sizeof(*self) + len);
+
+ if (self == NULL)
+ goto out;
+
+ if (perf_header__init(&self->header) < 0)
+ goto out_delete;
+
+ memcpy(self->filename, filename, len);
+
+ if (mode == O_RDONLY && perf_session__open(self, force) < 0) {
+ perf_session__delete(self);
+ self = NULL;
+ }
+out:
+ return self;
+out_delete:
+ free(self);
+ return NULL;
+}
+
+void perf_session__delete(struct perf_session *self)
+{
+ perf_header__exit(&self->header);
+ close(self->fd);
+ free(self);
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
new file mode 100644
index 000000000000..f3699c8c8ed4
--- /dev/null
+++ b/tools/perf/util/session.h
@@ -0,0 +1,16 @@
+#ifndef __PERF_SESSION_H
+#define __PERF_SESSION_H
+
+#include "header.h"
+
+struct perf_session {
+ struct perf_header header;
+ unsigned long size;
+ int fd;
+ char filename[0];
+};
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force);
+void perf_session__delete(struct perf_session *self);
+
+#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e7508ad3450f..d3d9fed74f1d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -29,11 +29,9 @@ enum dso_origin {
};
static void dsos__add(struct list_head *head, struct dso *dso);
-static struct map *thread__find_map_by_name(struct thread *self, char *name);
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter);
+ struct map_groups *mg, symbol_filter_t filter);
unsigned int symbol__priv_size;
static int vmlinux_path__nr_entries;
static char **vmlinux_path;
@@ -43,19 +41,41 @@ static struct symbol_conf symbol_conf__defaults = {
.try_vmlinux_path = true,
};
-static struct thread kthread_mem;
-struct thread *kthread = &kthread_mem;
+static struct map_groups kmaps_mem;
+struct map_groups *kmaps = &kmaps_mem;
bool dso__loaded(const struct dso *self, enum map_type type)
{
return self->loaded & (1 << type);
}
+bool dso__sorted_by_name(const struct dso *self, enum map_type type)
+{
+ return self->sorted_by_name & (1 << type);
+}
+
static void dso__set_loaded(struct dso *self, enum map_type type)
{
self->loaded |= (1 << type);
}
+static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
+{
+ self->sorted_by_name |= (1 << type);
+}
+
+static bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+{
+ switch (map_type) {
+ case MAP__FUNCTION:
+ return symbol_type == 'T' || symbol_type == 'W';
+ case MAP__VARIABLE:
+ return symbol_type == 'D' || symbol_type == 'd';
+ default:
+ return false;
+ }
+}
+
static void symbols__fixup_end(struct rb_root *self)
{
struct rb_node *nd, *prevnd = rb_first(self);
@@ -79,7 +99,7 @@ static void symbols__fixup_end(struct rb_root *self)
curr->end = roundup(curr->start, 4096);
}
-static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
+static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
{
struct map *prev, *curr;
struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
@@ -102,11 +122,11 @@ static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
curr->end = ~0UL;
}
-static void thread__fixup_maps_end(struct thread *self)
+static void map_groups__fixup_end(struct map_groups *self)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- __thread__fixup_maps_end(self, i);
+ __map_groups__fixup_end(self, i);
}
static struct symbol *symbol__new(u64 start, u64 len, const char *name)
@@ -164,11 +184,11 @@ struct dso *dso__new(const char *name)
dso__set_long_name(self, self->name);
self->short_name = self->name;
for (i = 0; i < MAP__NR_TYPES; ++i)
- self->symbols[i] = RB_ROOT;
- self->find_symbol = dso__find_symbol;
+ self->symbols[i] = self->symbol_names[i] = RB_ROOT;
self->slen_calculated = 0;
self->origin = DSO__ORIG_NOT_FOUND;
self->loaded = 0;
+ self->sorted_by_name = 0;
self->has_build_id = 0;
}
@@ -246,11 +266,85 @@ static struct symbol *symbols__find(struct rb_root *self, u64 ip)
return NULL;
}
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr)
+struct symbol_name_rb_node {
+ struct rb_node rb_node;
+ struct symbol sym;
+};
+
+static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
+ if (strcmp(sym->name, s->sym.name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&symn->rb_node, parent, p);
+ rb_insert_color(&symn->rb_node, self);
+}
+
+static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ symbols__insert_by_name(self, pos);
+ }
+}
+
+static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
+{
+ struct rb_node *n;
+
+ if (self == NULL)
+ return NULL;
+
+ n = self->rb_node;
+
+ while (n) {
+ struct symbol_name_rb_node *s;
+ int cmp;
+
+ s = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ cmp = strcmp(name, s->sym.name);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return &s->sym;
+ }
+
+ return NULL;
+}
+
+struct symbol *dso__find_symbol(struct dso *self,
+ enum map_type type, u64 addr)
{
return symbols__find(&self->symbols[type], addr);
}
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name)
+{
+ return symbols__find_by_name(&self->symbol_names[type], name);
+}
+
+void dso__sort_by_name(struct dso *self, enum map_type type)
+{
+ dso__set_sorted_by_name(self, type);
+ return symbols__sort_by_name(&self->symbol_names[type],
+ &self->symbols[type]);
+}
+
int build_id__sprintf(u8 *self, int len, char *bf)
{
char *bid = bf;
@@ -327,10 +421,7 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
continue;
symbol_type = toupper(line[len]);
- /*
- * We're interested only in code ('T'ext)
- */
- if (symbol_type != 'T' && symbol_type != 'W')
+ if (!symbol_type__is_a(symbol_type, map->type))
continue;
symbol_name = line + len + 2;
@@ -364,8 +455,8 @@ out_failure:
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread *thread,
- symbol_filter_t filter)
+static int dso__split_kallsyms(struct dso *self, struct map *map,
+ struct map_groups *mg, symbol_filter_t filter)
{
struct map *curr_map = map;
struct symbol *pos;
@@ -382,13 +473,13 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
module = strchr(pos->name, '\t');
if (module) {
- if (!thread->use_modules)
+ if (!mg->use_modules)
goto discard_symbol;
*module++ = '\0';
if (strcmp(self->name, module)) {
- curr_map = thread__find_map_by_name(thread, module);
+ curr_map = map_groups__find_by_name(mg, map->type, module);
if (curr_map == NULL) {
pr_debug("/proc/{kallsyms,modules} "
"inconsistency!\n");
@@ -419,7 +510,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
}
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- __thread__insert_map(thread, curr_map);
+ map_groups__insert(mg, curr_map);
++kernel_range;
}
@@ -440,7 +531,7 @@ discard_symbol: rb_erase(&pos->rb_node, root);
static int dso__load_kallsyms(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+ struct map_groups *mg, symbol_filter_t filter)
{
if (dso__load_all_kallsyms(self, map) < 0)
return -1;
@@ -448,13 +539,13 @@ static int dso__load_kallsyms(struct dso *self, struct map *map,
symbols__fixup_end(&self->symbols[map->type]);
self->origin = DSO__ORIG_KERNEL;
- return dso__split_kallsyms(self, map, thread, filter);
+ return dso__split_kallsyms(self, map, mg, filter);
}
size_t kernel_maps__fprintf(FILE *fp)
{
size_t printed = fprintf(fp, "Kernel maps:\n");
- printed += thread__fprintf_maps(kthread, fp);
+ printed += map_groups__fprintf_maps(kmaps, fp);
return printed + fprintf(fp, "END kernel maps\n");
}
@@ -544,6 +635,13 @@ static inline int elf_sym__is_function(const GElf_Sym *sym)
sym->st_shndx != SHN_UNDEF;
}
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
static inline int elf_sym__is_label(const GElf_Sym *sym)
{
return elf_sym__type(sym) == STT_NOTYPE &&
@@ -564,6 +662,12 @@ static inline int elf_sec__is_text(const GElf_Shdr *shdr,
return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
}
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
@@ -744,8 +848,32 @@ out:
return 0;
}
+static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(self);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(self);
+ default:
+ return false;
+ }
+}
+
+static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(self, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(self, secstrs);
+ default:
+ return false;
+ }
+}
+
static int dso__load_sym(struct dso *self, struct map *map,
- struct thread *thread, const char *name, int fd,
+ struct map_groups *mg, const char *name, int fd,
symbol_filter_t filter, int kernel, int kmodule)
{
struct map *curr_map = map;
@@ -818,7 +946,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
int is_label = elf_sym__is_label(&sym);
const char *section_name;
- if (!is_label && !elf_sym__is_function(&sym))
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
continue;
sec = elf_getscn(elf, sym.st_shndx);
@@ -827,7 +955,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
gelf_getshdr(sec, &shdr);
- if (is_label && !elf_sec__is_text(&shdr, secstrs))
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
continue;
elf_name = elf_sym__name(&sym, symstrs);
@@ -849,7 +977,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
snprintf(dso_name, sizeof(dso_name),
"%s%s", self->short_name, section_name);
- curr_map = thread__find_map_by_name(thread, dso_name);
+ curr_map = map_groups__find_by_name(mg, map->type, dso_name);
if (curr_map == NULL) {
u64 start = sym.st_value;
@@ -868,7 +996,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
curr_dso->origin = DSO__ORIG_KERNEL;
- __thread__insert_map(kthread, curr_map);
+ map_groups__insert(kmaps, curr_map);
dsos__add(&dsos__kernel, curr_dso);
} else
curr_dso = curr_map->dso;
@@ -1094,7 +1222,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
dso__set_loaded(self, map->type);
if (self->kernel)
- return dso__load_kernel_sym(self, map, kthread, filter);
+ return dso__load_kernel_sym(self, map, kmaps, filter);
name = malloc(size);
if (!name)
@@ -1180,11 +1308,12 @@ out:
return ret;
}
-static struct map *thread__find_map_by_name(struct thread *self, char *name)
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name)
{
struct rb_node *nd;
- for (nd = rb_first(&self->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
if (map->dso && strcmp(map->dso->name, name) == 0)
@@ -1228,7 +1357,7 @@ static int dsos__set_modules_path_dir(char *dirname)
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
- map = thread__find_map_by_name(kthread, dso_name);
+ map = map_groups__find_by_name(kmaps, MAP__FUNCTION, dso_name);
if (map == NULL)
continue;
@@ -1281,7 +1410,7 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
return self;
}
-static int thread__create_module_maps(struct thread *self)
+static int map_groups__create_module_maps(struct map_groups *self)
{
char *line = NULL;
size_t n;
@@ -1338,7 +1467,7 @@ static int thread__create_module_maps(struct thread *self)
dso->has_build_id = true;
dso->origin = DSO__ORIG_KMODULE;
- __thread__insert_map(self, map);
+ map_groups__insert(self, map);
dsos__add(&dsos__kernel, dso);
}
@@ -1353,7 +1482,8 @@ out_failure:
return -1;
}
-static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *thread,
+static int dso__load_vmlinux(struct dso *self, struct map *map,
+ struct map_groups *mg,
const char *vmlinux, symbol_filter_t filter)
{
int err = -1, fd;
@@ -1387,14 +1517,14 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *t
return -1;
dso__set_loaded(self, map->type);
- err = dso__load_sym(self, map, thread, self->long_name, fd, filter, 1, 0);
+ err = dso__load_sym(self, map, mg, self->long_name, fd, filter, 1, 0);
close(fd);
return err;
}
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+ struct map_groups *mg, symbol_filter_t filter)
{
int err;
bool is_kallsyms;
@@ -1404,7 +1534,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
pr_debug("Looking at the vmlinux_path (%d entries long)\n",
vmlinux_path__nr_entries);
for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(self, map, thread,
+ err = dso__load_vmlinux(self, map, mg,
vmlinux_path[i], filter);
if (err > 0) {
pr_debug("Using %s for symbols\n",
@@ -1420,12 +1550,12 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
if (is_kallsyms)
goto do_kallsyms;
- err = dso__load_vmlinux(self, map, thread, self->long_name, filter);
+ err = dso__load_vmlinux(self, map, mg, self->long_name, filter);
if (err <= 0) {
pr_info("The file %s cannot be used, "
"trying to use /proc/kallsyms...", self->long_name);
do_kallsyms:
- err = dso__load_kallsyms(self, map, thread, filter);
+ err = dso__load_kallsyms(self, map, mg, filter);
if (err > 0 && !is_kallsyms)
dso__set_long_name(self, strdup("[kernel.kallsyms]"));
}
@@ -1508,42 +1638,59 @@ size_t dsos__fprintf_buildid(FILE *fp)
__dsos__fprintf_buildid(&dsos__user, fp));
}
-static int thread__create_kernel_map(struct thread *self, const char *vmlinux)
+static struct dso *dsos__create_kernel( const char *vmlinux)
{
- struct map *kmap;
struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]");
if (kernel == NULL)
- return -1;
-
- kmap = map__new2(0, kernel, MAP__FUNCTION);
- if (kmap == NULL)
- goto out_delete_kernel_dso;
+ return NULL;
- kmap->map_ip = kmap->unmap_ip = identity__map_ip;
kernel->short_name = "[kernel]";
kernel->kernel = 1;
vdso = dso__new("[vdso]");
if (vdso == NULL)
- goto out_delete_kernel_map;
+ goto out_delete_kernel_dso;
dso__set_loaded(vdso, MAP__FUNCTION);
if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id,
sizeof(kernel->build_id)) == 0)
kernel->has_build_id = true;
- __thread__insert_map(self, kmap);
dsos__add(&dsos__kernel, kernel);
dsos__add(&dsos__user, vdso);
- return 0;
+ return kernel;
-out_delete_kernel_map:
- map__delete(kmap);
out_delete_kernel_dso:
dso__delete(kernel);
- return -1;
+ return NULL;
+}
+
+static int map_groups__create_kernel_maps(struct map_groups *self, const char *vmlinux)
+{
+ struct map *functions, *variables;
+ struct dso *kernel = dsos__create_kernel(vmlinux);
+
+ if (kernel == NULL)
+ return -1;
+
+ functions = map__new2(0, kernel, MAP__FUNCTION);
+ if (functions == NULL)
+ return -1;
+
+ variables = map__new2(0, kernel, MAP__VARIABLE);
+ if (variables == NULL) {
+ map__delete(functions);
+ return -1;
+ }
+
+ functions->map_ip = functions->unmap_ip =
+ variables->map_ip = variables->unmap_ip = identity__map_ip;
+ map_groups__insert(self, functions);
+ map_groups__insert(self, variables);
+
+ return 0;
}
static void vmlinux_path__exit(void)
@@ -1607,23 +1754,26 @@ int symbol__init(struct symbol_conf *conf)
elf_version(EV_CURRENT);
symbol__priv_size = pconf->priv_size;
- thread__init(kthread, 0);
+ if (pconf->sort_by_name)
+ symbol__priv_size += (sizeof(struct symbol_name_rb_node) -
+ sizeof(struct symbol));
+ map_groups__init(kmaps);
if (pconf->try_vmlinux_path && vmlinux_path__init() < 0)
return -1;
- if (thread__create_kernel_map(kthread, pconf->vmlinux_name) < 0) {
+ if (map_groups__create_kernel_maps(kmaps, pconf->vmlinux_name) < 0) {
vmlinux_path__exit();
return -1;
}
- kthread->use_modules = pconf->use_modules;
- if (pconf->use_modules && thread__create_module_maps(kthread) < 0)
+ kmaps->use_modules = pconf->use_modules;
+ if (pconf->use_modules && map_groups__create_module_maps(kmaps) < 0)
pr_debug("Failed to load list of modules in use, "
"continuing...\n");
/*
* Now that we have all the maps created, just set the ->end of them:
*/
- thread__fixup_maps_end(kthread);
+ map_groups__fixup_end(kmaps);
return 0;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 17003efa0b39..cf99f88adf39 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -52,7 +52,8 @@ struct symbol {
struct symbol_conf {
unsigned short priv_size;
bool try_vmlinux_path,
- use_modules;
+ use_modules,
+ sort_by_name;
const char *vmlinux_name;
};
@@ -74,13 +75,13 @@ struct addr_location {
struct dso {
struct list_head node;
struct rb_root symbols[MAP__NR_TYPES];
- struct symbol *(*find_symbol)(struct dso *self,
- enum map_type type, u64 addr);
+ struct rb_root symbol_names[MAP__NR_TYPES];
u8 adjust_symbols:1;
u8 slen_calculated:1;
u8 has_build_id:1;
u8 kernel:1;
unsigned char origin;
+ u8 sorted_by_name;
u8 loaded;
u8 build_id[BUILD_ID_SIZE];
u16 long_name_len;
@@ -93,6 +94,9 @@ struct dso *dso__new(const char *name);
void dso__delete(struct dso *self);
bool dso__loaded(const struct dso *self, enum map_type type);
+bool dso__sorted_by_name(const struct dso *self, enum map_type type);
+
+void dso__sort_by_name(struct dso *self, enum map_type type);
struct dso *dsos__findnew(const char *name);
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
@@ -103,6 +107,9 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
char dso__symtab_origin(const struct dso *self);
void dso__set_build_id(struct dso *self, void *build_id);
+struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
@@ -113,8 +120,8 @@ size_t kernel_maps__fprintf(FILE *fp);
int symbol__init(struct symbol_conf *conf);
-struct thread;
-struct thread *kthread;
+struct map_groups;
+struct map_groups *kmaps;
extern struct list_head dsos__user, dsos__kernel;
extern struct dso *vdso;
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 603f5610861b..b68a00ea4121 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -9,11 +9,9 @@
static struct rb_root threads;
static struct thread *last_match;
-void thread__init(struct thread *self, pid_t pid)
+void map_groups__init(struct map_groups *self)
{
int i;
- self->pid = pid;
- self->comm = NULL;
for (i = 0; i < MAP__NR_TYPES; ++i) {
self->maps[i] = RB_ROOT;
INIT_LIST_HEAD(&self->removed_maps[i]);
@@ -25,7 +23,8 @@ static struct thread *thread__new(pid_t pid)
struct thread *self = zalloc(sizeof(*self));
if (self != NULL) {
- thread__init(self, pid);
+ map_groups__init(&self->mg);
+ self->pid = pid;
self->comm = malloc(32);
if (self->comm)
snprintf(self->comm, 32, ":%d", self->pid);
@@ -55,10 +54,11 @@ int thread__comm_len(struct thread *self)
static const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
+ [MAP__VARIABLE] = "Variables",
};
-static size_t __thread__fprintf_maps(struct thread *self,
- enum map_type type, FILE *fp)
+static size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, FILE *fp)
{
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
struct rb_node *nd;
@@ -76,16 +76,16 @@ static size_t __thread__fprintf_maps(struct thread *self,
return printed;
}
-size_t thread__fprintf_maps(struct thread *self, FILE *fp)
+size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_maps(self, i, fp);
+ printed += __map_groups__fprintf_maps(self, i, fp);
return printed;
}
-static size_t __thread__fprintf_removed_maps(struct thread *self,
- enum map_type type, FILE *fp)
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
+ enum map_type type, FILE *fp)
{
struct map *pos;
size_t printed = 0;
@@ -101,20 +101,25 @@ static size_t __thread__fprintf_removed_maps(struct thread *self,
return printed;
}
-static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp)
+static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_removed_maps(self, i, fp);
+ printed += __map_groups__fprintf_removed_maps(self, i, fp);
return printed;
}
-static size_t thread__fprintf(struct thread *self, FILE *fp)
+static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
{
- size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
- printed += thread__fprintf_removed_maps(self, fp);
+ size_t printed = map_groups__fprintf_maps(self, fp);
printed += fprintf(fp, "Removed maps:\n");
- return printed + thread__fprintf_removed_maps(self, fp);
+ return printed + map_groups__fprintf_removed_maps(self, fp);
+}
+
+static size_t thread__fprintf(struct thread *self, FILE *fp)
+{
+ return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
+ map_groups__fprintf(&self->mg, fp);
}
struct thread *threads__findnew(pid_t pid)
@@ -168,7 +173,8 @@ struct thread *register_idle_thread(void)
return thread;
}
-static void thread__remove_overlappings(struct thread *self, struct map *map)
+static void map_groups__remove_overlappings(struct map_groups *self,
+ struct map *map)
{
struct rb_root *root = &self->maps[map->type];
struct rb_node *next = rb_first(root);
@@ -238,12 +244,15 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
void thread__insert_map(struct thread *self, struct map *map)
{
- thread__remove_overlappings(self, map);
- maps__insert(&self->maps[map->type], map);
+ map_groups__remove_overlappings(&self->mg, map);
+ map_groups__insert(&self->mg, map);
}
-static int thread__clone_maps(struct thread *self, struct thread *parent,
- enum map_type type)
+/*
+ * XXX This should not really _copy_ te maps, but refcount them.
+ */
+static int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type)
{
struct rb_node *nd;
for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
@@ -251,7 +260,7 @@ static int thread__clone_maps(struct thread *self, struct thread *parent,
struct map *new = map__clone(map);
if (new == NULL)
return -ENOMEM;
- thread__insert_map(self, new);
+ map_groups__insert(self, new);
}
return 0;
}
@@ -267,7 +276,7 @@ int thread__fork(struct thread *self, struct thread *parent)
return -ENOMEM;
for (i = 0; i < MAP__NR_TYPES; ++i)
- if (thread__clone_maps(self, parent, i) < 0)
+ if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
return -ENOMEM;
return 0;
}
@@ -286,11 +295,11 @@ size_t threads__fprintf(FILE *fp)
return ret;
}
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter)
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ symbol_filter_t filter)
{
- struct map *map = thread__find_map(self, type, addr);
+ struct map *map = map_groups__find(self, type, addr);
if (map != NULL)
return map__find_symbol(map, map->map_ip(map, addr), filter);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 686d6e914d9e..1751802a09ba 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,52 +5,66 @@
#include <unistd.h>
#include "symbol.h"
-struct thread {
- struct rb_node rb_node;
+struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
- pid_t pid;
bool use_modules;
+};
+
+struct thread {
+ struct rb_node rb_node;
+ struct map_groups mg;
+ pid_t pid;
char shortname[3];
char *comm;
int comm_len;
};
-void thread__init(struct thread *self, pid_t pid);
+void map_groups__init(struct map_groups *self);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
struct thread *threads__findnew(pid_t pid);
struct thread *register_idle_thread(void);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t thread__fprintf_maps(struct thread *self, FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp);
size_t threads__fprintf(FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
-static inline struct map *thread__find_map(struct thread *self,
+static inline void map_groups__insert(struct map_groups *self, struct map *map)
+{
+ maps__insert(&self->maps[map->type], map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *self,
enum map_type type, u64 addr)
{
- return self ? maps__find(&self->maps[type], addr) : NULL;
+ return maps__find(&self->maps[type], addr);
}
-static inline void __thread__insert_map(struct thread *self, struct map *map)
+static inline struct map *thread__find_map(struct thread *self,
+ enum map_type type, u64 addr)
{
- maps__insert(&self->maps[map->type], map);
+ return self ? map_groups__find(&self->mg, type, addr) : NULL;
}
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter);
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ symbol_filter_t filter);
static inline struct symbol *
-thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter)
+map_groups__find_function(struct map_groups *self, u64 addr,
+ symbol_filter_t filter)
{
- return thread__find_symbol(self, MAP__FUNCTION, addr, filter);
+ return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter);
}
+
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name);
#endif /* __PERF_THREAD_H */