summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/DocBook/Makefile3
-rw-r--r--Documentation/DocBook/genericirq.tmpl474
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/IRQ.txt22
-rw-r--r--Documentation/RCU/torture.txt34
-rw-r--r--Documentation/arm/Samsung-S3C24XX/Overview.txt35
-rw-r--r--Documentation/arm/Samsung-S3C24XX/S3C2412.txt120
-rw-r--r--Documentation/arm/Samsung-S3C24XX/S3C2413.txt21
-rw-r--r--Documentation/atomic_ops.txt28
-rw-r--r--Documentation/console/console.txt144
-rw-r--r--Documentation/driver-model/overview.txt2
-rw-r--r--Documentation/fb/fbcon.txt180
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/filesystems/ext3.txt8
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rw-r--r--Documentation/kdump/gdbmacros.txt2
-rw-r--r--Documentation/kernel-parameters.txt32
-rw-r--r--Documentation/keys-request-key.txt54
-rw-r--r--Documentation/keys.txt72
-rw-r--r--Documentation/md.txt67
-rw-r--r--Documentation/pi-futex.txt121
-rw-r--r--Documentation/robust-futexes.txt2
-rw-r--r--Documentation/rt-mutex-design.txt781
-rw-r--r--Documentation/rt-mutex.txt79
-rw-r--r--Documentation/scsi/ppa.txt2
-rw-r--r--Documentation/tty.txt7
-rw-r--r--Documentation/video4linux/README.pvrusb2212
-rw-r--r--Documentation/watchdog/pcwd-watchdog.txt75
-rw-r--r--Documentation/watchdog/src/watchdog-simple.c15
-rw-r--r--Documentation/watchdog/src/watchdog-test.c68
-rw-r--r--Documentation/watchdog/watchdog-api.txt56
-rw-r--r--Documentation/watchdog/watchdog.txt23
-rw-r--r--Documentation/x86_64/boot-options.txt21
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile213
-rw-r--r--arch/alpha/kernel/irq.c8
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/irq_i8259.c2
-rw-r--r--arch/alpha/kernel/irq_pyxis.c2
-rw-r--r--arch/alpha/kernel/irq_srm.c2
-rw-r--r--arch/alpha/kernel/pci.c4
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c2
-rw-r--r--arch/alpha/kernel/sys_dp264.c2
-rw-r--r--arch/alpha/kernel/sys_eb64p.c2
-rw-r--r--arch/alpha/kernel/sys_eiger.c2
-rw-r--r--arch/alpha/kernel/sys_jensen.c10
-rw-r--r--arch/alpha/kernel/sys_marvel.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c2
-rw-r--r--arch/alpha/kernel/sys_rawhide.c2
-rw-r--r--arch/alpha/kernel/sys_rx164.c2
-rw-r--r--arch/alpha/kernel/sys_sable.c2
-rw-r--r--arch/alpha/kernel/sys_takara.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c2
-rw-r--r--arch/alpha/kernel/sys_wildfire.c6
-rw-r--r--arch/alpha/oprofile/common.c2
-rw-r--r--arch/arm/Kconfig13
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/head-at91rm9200.S6
-rw-r--r--arch/arm/boot/compressed/ll_char_wr.S6
-rw-r--r--arch/arm/common/locomo.c45
-rw-r--r--arch/arm/configs/onearm_defconfig1053
-rw-r--r--arch/arm/configs/s3c2410_defconfig50
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/armksyms.c13
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/bios32.c6
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c83
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S4
-rw-r--r--arch/arm/kernel/ptrace.c36
-rw-r--r--arch/arm/kernel/setup.c44
-rw-r--r--arch/arm/kernel/signal.c39
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/lib/Makefile13
-rw-r--r--arch/arm/lib/backtrace.S13
-rw-r--r--arch/arm/lib/clear_user.S8
-rw-r--r--arch/arm/lib/copy_from_user.S4
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S2
-rw-r--r--arch/arm/lib/delay.S18
-rw-r--r--arch/arm/lib/ecard.S4
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/io-readsb.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S6
-rw-r--r--arch/arm/lib/io-writesb.S6
-rw-r--r--arch/arm/lib/io-writesw-armv3.S6
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S4
-rw-r--r--arch/arm/lib/memzero.S4
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strncpy_from_user.S7
-rw-r--r--arch/arm/lib/strnlen_user.S9
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess.S16
-rw-r--r--arch/arm/mach-at91rm9200/Kconfig6
-rw-r--r--arch/arm/mach-at91rm9200/Makefile1
-rw-r--r--arch/arm/mach-at91rm9200/board-1arm.c109
-rw-r--r--arch/arm/mach-ep93xx/Kconfig11
-rw-r--r--arch/arm/mach-ep93xx/Makefile1
-rw-r--r--arch/arm/mach-ep93xx/edb9315.c62
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c2
-rw-r--r--arch/arm/mach-ixp23xx/espresso.c2
-rw-r--r--arch/arm/mach-ixp23xx/ixdp2351.c2
-rw-r--r--arch/arm/mach-ixp23xx/roadrunner.c2
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig3
-rw-r--r--arch/arm/mach-ixp4xx/Makefile24
-rw-r--r--arch/arm/mach-pxa/irq.c4
-rw-r--r--arch/arm/mach-pxa/sleep.S2
-rw-r--r--arch/arm/mach-s3c2410/Kconfig4
-rw-r--r--arch/arm/mach-s3c2410/s3c244x.c1
-rw-r--r--arch/arm/mach-s3c2410/sleep.S2
-rw-r--r--arch/arm/mach-sa1100/sleep.S2
-rw-r--r--arch/arm/mm/Kconfig67
-rw-r--r--arch/arm/mm/Makefile10
-rw-r--r--arch/arm/mm/copypage-v3.S2
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/iomap.c55
-rw-r--r--arch/arm/mm/ioremap.c47
-rw-r--r--arch/arm/mm/nommu.c39
-rw-r--r--arch/arm/mm/proc-arm1020.S9
-rw-r--r--arch/arm/mm/proc-arm1020e.S9
-rw-r--r--arch/arm/mm/proc-arm1022.S9
-rw-r--r--arch/arm/mm/proc-arm1026.S9
-rw-r--r--arch/arm/mm/proc-arm6_7.S15
-rw-r--r--arch/arm/mm/proc-arm720.S12
-rw-r--r--arch/arm/mm/proc-arm920.S9
-rw-r--r--arch/arm/mm/proc-arm922.S9
-rw-r--r--arch/arm/mm/proc-arm925.S10
-rw-r--r--arch/arm/mm/proc-arm926.S9
-rw-r--r--arch/arm/mm/proc-sa110.S11
-rw-r--r--arch/arm/mm/proc-sa1100.S11
-rw-r--r--arch/arm/mm/proc-v6.S39
-rw-r--r--arch/arm/nwfpe/entry26.S2
-rw-r--r--arch/arm/tools/mach-types71
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/kernel/irq.c2
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c4
-rw-r--r--arch/cris/arch-v32/kernel/irq.c2
-rw-r--r--arch/cris/kernel/irq.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c4
-rw-r--r--arch/i386/Kconfig58
-rw-r--r--arch/i386/Kconfig.cpu2
-rw-r--r--arch/i386/boot/Makefile9
-rw-r--r--arch/i386/boot/compressed/misc.c32
-rw-r--r--arch/i386/boot/video.S19
-rw-r--r--arch/i386/crypto/aes-i586-asm.S29
-rw-r--r--arch/i386/crypto/aes.c20
-rw-r--r--arch/i386/kernel/Makefile8
-rw-r--r--arch/i386/kernel/alternative.c118
-rw-r--r--arch/i386/kernel/apic.c16
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/asm-offsets.c7
-rw-r--r--arch/i386/kernel/cpu/amd.c22
-rw-r--r--arch/i386/kernel/cpu/common.c25
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel.c6
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c123
-rw-r--r--arch/i386/kernel/cpu/proc.c8
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/crash.c9
-rw-r--r--arch/i386/kernel/efi.c6
-rw-r--r--arch/i386/kernel/entry.S285
-rw-r--r--arch/i386/kernel/hpet.c67
-rw-r--r--arch/i386/kernel/i8253.c118
-rw-r--r--arch/i386/kernel/i8259.c8
-rw-r--r--arch/i386/kernel/io_apic.c70
-rw-r--r--arch/i386/kernel/irq.c29
-rw-r--r--arch/i386/kernel/kprobes.c95
-rw-r--r--arch/i386/kernel/machine_kexec.c4
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/nmi.c72
-rw-r--r--arch/i386/kernel/numaq.c10
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/scx200.c66
-rw-r--r--arch/i386/kernel/setup.c109
-rw-r--r--arch/i386/kernel/signal.c4
-rw-r--r--arch/i386/kernel/smp.c12
-rw-r--r--arch/i386/kernel/smpboot.c38
-rw-r--r--arch/i386/kernel/sysenter.c126
-rw-r--r--arch/i386/kernel/time.c157
-rw-r--r--arch/i386/kernel/timers/Makefile9
-rw-r--r--arch/i386/kernel/timers/common.c172
-rw-r--r--arch/i386/kernel/timers/timer.c75
-rw-r--r--arch/i386/kernel/timers/timer_cyclone.c259
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c217
-rw-r--r--arch/i386/kernel/timers/timer_none.c39
-rw-r--r--arch/i386/kernel/timers/timer_pit.c177
-rw-r--r--arch/i386/kernel/timers/timer_pm.c342
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c617
-rw-r--r--arch/i386/kernel/topology.c28
-rw-r--r--arch/i386/kernel/traps.c70
-rw-r--r--arch/i386/kernel/tsc.c478
-rw-r--r--arch/i386/kernel/vmlinux.lds.S9
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S4
-rw-r--r--arch/i386/kernel/vsyscall.lds.S4
-rw-r--r--arch/i386/lib/delay.c65
-rw-r--r--arch/i386/mach-visws/setup.c8
-rw-r--r--arch/i386/mach-visws/visws_apic.c12
-rw-r--r--arch/i386/mach-voyager/setup.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c4
-rw-r--r--arch/i386/mm/fault.c38
-rw-r--r--arch/i386/mm/init.c5
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/i386/oprofile/nmi_int.c4
-rw-r--r--arch/i386/oprofile/op_model_athlon.c1
-rw-r--r--arch/i386/oprofile/op_model_p4.c1
-rw-r--r--arch/i386/oprofile/op_model_ppro.c1
-rw-r--r--arch/i386/pci/i386.c4
-rw-r--r--arch/i386/pci/pcbios.c6
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c6
-rw-r--r--arch/ia64/kernel/iosapic.c24
-rw-r--r--arch/ia64/kernel/irq.c24
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/irq_lsapic.c10
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/palinfo.c6
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/smpboot.c8
-rw-r--r--arch/ia64/kernel/topology.c32
-rw-r--r--arch/ia64/mm/discontig.c57
-rw-r--r--arch/ia64/mm/fault.c36
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/irq.c6
-rw-r--r--arch/ia64/sn/kernel/setup.c12
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--arch/m32r/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/m32r/kernel/setup_m32104ut.c8
-rw-r--r--arch/m32r/kernel/setup_m32700ut.c28
-rw-r--r--arch/m32r/kernel/setup_mappi.c16
-rw-r--r--arch/m32r/kernel/setup_mappi2.c20
-rw-r--r--arch/m32r/kernel/setup_mappi3.c20
-rw-r--r--arch/m32r/kernel/setup_oaks32r.c12
-rw-r--r--arch/m32r/kernel/setup_opsput.c28
-rw-r--r--arch/m32r/kernel/setup_usrv.c18
-rw-r--r--arch/m68k/mm/memory.c6
-rw-r--r--arch/m68k/sun3/sun3dvma.c6
-rw-r--r--arch/m68knommu/Kconfig297
-rw-r--r--arch/m68knommu/Makefile24
-rw-r--r--arch/m68knommu/defconfig207
-rw-r--r--arch/m68knommu/kernel/comempci.c3
-rw-r--r--arch/m68knommu/kernel/setup.c68
-rw-r--r--arch/m68knommu/kernel/signal.c4
-rw-r--r--arch/m68knommu/kernel/traps.c4
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S224
-rw-r--r--arch/m68knommu/mm/init.c6
-rw-r--r--arch/m68knommu/platform/5307/Makefile1
-rw-r--r--arch/m68knommu/platform/5307/entry.S46
-rw-r--r--arch/m68knommu/platform/5307/head.S82
-rw-r--r--arch/m68knommu/platform/5307/pit.c37
-rw-r--r--arch/m68knommu/platform/5307/timers.c49
-rw-r--r--arch/m68knommu/platform/532x/Makefile20
-rw-r--r--arch/m68knommu/platform/532x/config.c486
-rw-r--r--arch/m68knommu/platform/68328/Makefile1
-rw-r--r--arch/m68knommu/platform/68328/head-pilot.S3
-rw-r--r--arch/m68knommu/platform/68328/head-ram.S6
-rw-r--r--arch/m68knommu/platform/68328/head-rom.S18
-rw-r--r--arch/m68knommu/platform/68328/ints.c20
-rw-r--r--arch/m68knommu/platform/68328/romvec.S37
-rw-r--r--arch/m68knommu/platform/68360/config.c14
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S19
-rw-r--r--arch/m68knommu/platform/68360/head-rom.S17
-rw-r--r--arch/m68knommu/platform/68360/ints.c1
-rw-r--r--arch/m68knommu/platform/68EZ328/config.c14
-rw-r--r--arch/m68knommu/platform/68VZ328/config.c14
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/au1000/common/irq.c20
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c2
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/kn02-irq.c2
-rw-r--r--arch/mips/gt64120/ev64120/irq.c2
-rw-r--r--arch/mips/ite-boards/generic/irq.c4
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c2
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c2
-rw-r--r--arch/mips/kernel/irq-rm7000.c2
-rw-r--r--arch/mips/kernel/irq-rm9000.c4
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/irq_cpu.c4
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c4
-rw-r--r--arch/mips/lasat/interrupt.c2
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c2
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c4
-rw-r--r--arch/mips/momentum/ocelot_g/gt-irq.c4
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/pci/pci.c4
-rw-r--r--arch/mips/philips/pnx8550/common/int.c10
-rw-r--r--arch/mips/pmc-sierra/yosemite/ht.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c12
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c4
-rw-r--r--arch/mips/sibyte/sb1250/irq.c4
-rw-r--r--arch/mips/sni/irq.c2
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c14
-rw-r--r--arch/mips/tx4938/common/irq.c4
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c2
-rw-r--r--arch/mips/vr41xx/common/icu.c4
-rw-r--r--arch/mips/vr41xx/common/irq.c4
-rw-r--r--arch/mips/vr41xx/common/vrc4173.c2
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/kernel/irq.c29
-rw-r--r--arch/parisc/kernel/pci.c2
-rw-r--r--arch/parisc/kernel/topology.c3
-rw-r--r--arch/powerpc/Kconfig7
-rw-r--r--arch/powerpc/kernel/crash.c8
-rw-r--r--arch/powerpc/kernel/irq.c10
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c4
-rw-r--r--arch/powerpc/kernel/pci_32.c47
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c31
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/mm/fault.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c11
-rw-r--r--arch/powerpc/mm/numa.c11
-rw-r--r--arch/powerpc/oprofile/common.c2
-rw-r--r--arch/powerpc/platforms/83xx/pci.c5
-rw-r--r--arch/powerpc/platforms/85xx/pci.c5
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c4
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c4
-rw-r--r--arch/powerpc/platforms/chrp/pci.c4
-rw-r--r--arch/powerpc/platforms/iseries/irq.c6
-rw-r--r--arch/powerpc/platforms/maple/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c14
-rw-r--r--arch/powerpc/platforms/powermac/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c12
-rw-r--r--arch/powerpc/sysdev/i8259.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c10
-rw-r--r--arch/ppc/8xx_io/commproc.c2
-rw-r--r--arch/ppc/kernel/machine_kexec.c4
-rw-r--r--arch/ppc/kernel/pci.c52
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/ppc/platforms/apus_setup.c4
-rw-r--r--arch/ppc/platforms/sbc82xx.c2
-rw-r--r--arch/ppc/syslib/cpc700_pic.c4
-rw-r--r--arch/ppc/syslib/cpm2_pic.c2
-rw-r--r--arch/ppc/syslib/gt64260_pic.c2
-rw-r--r--arch/ppc/syslib/m82xx_pci.c2
-rw-r--r--arch/ppc/syslib/m8xx_setup.c4
-rw-r--r--arch/ppc/syslib/mpc52xx_pic.c4
-rw-r--r--arch/ppc/syslib/mv64360_pic.c2
-rw-r--r--arch/ppc/syslib/open_pic.c8
-rw-r--r--arch/ppc/syslib/open_pic2.c2
-rw-r--r--arch/ppc/syslib/ppc403_pic.c2
-rw-r--r--arch/ppc/syslib/ppc4xx_pic.c2
-rw-r--r--arch/ppc/syslib/xilinx_pic.c2
-rw-r--r--arch/s390/appldata/appldata.h24
-rw-r--r--arch/s390/appldata/appldata_base.c44
-rw-r--r--arch/s390/appldata/appldata_mem.c5
-rw-r--r--arch/s390/appldata/appldata_net_sum.c5
-rw-r--r--arch/s390/appldata/appldata_os.c98
-rw-r--r--arch/s390/crypto/aes_s390.c14
-rw-r--r--arch/s390/crypto/des_s390.c42
-rw-r--r--arch/s390/crypto/sha1_s390.c34
-rw-r--r--arch/s390/crypto/sha256_s390.c14
-rw-r--r--arch/s390/kernel/binfmt_elf32.c5
-rw-r--r--arch/s390/kernel/entry.S80
-rw-r--r--arch/s390/kernel/entry64.S80
-rw-r--r--arch/s390/kernel/head.S22
-rw-r--r--arch/s390/kernel/head31.S77
-rw-r--r--arch/s390/kernel/head64.S79
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/setup.c55
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/traps.c8
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/boards/adx/irq_maskreg.c2
-rw-r--r--arch/sh/boards/bigsur/irq.c4
-rw-r--r--arch/sh/boards/cqreek/irq.c4
-rw-r--r--arch/sh/boards/dreamcast/setup.c2
-rw-r--r--arch/sh/boards/ec3104/setup.c2
-rw-r--r--arch/sh/boards/harp/irq.c2
-rw-r--r--arch/sh/boards/mpc1211/pci.c4
-rw-r--r--arch/sh/boards/mpc1211/setup.c2
-rw-r--r--arch/sh/boards/overdrive/galileo.c2
-rw-r--r--arch/sh/boards/overdrive/irq.c2
-rw-r--r--arch/sh/boards/renesas/hs7751rvoip/irq.c2
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/irq.c2
-rw-r--r--arch/sh/boards/renesas/systemh/irq.c2
-rw-r--r--arch/sh/boards/se/73180/irq.c2
-rw-r--r--arch/sh/boards/superh/microdev/irq.c2
-rw-r--r--arch/sh/cchips/hd6446x/hd64461/setup.c2
-rw-r--r--arch/sh/cchips/hd6446x/hd64465/setup.c2
-rw-r--r--arch/sh/cchips/voyagergx/irq.c2
-rw-r--r--arch/sh/drivers/pci/pci.c6
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c2
-rw-r--r--arch/sh/kernel/cpu/irq/intc2.c2
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c2
-rw-r--r--arch/sh/kernel/cpu/irq/pint.c2
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/machine_kexec.c4
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/oprofile/op_model_sh7750.c2
-rw-r--r--arch/sh64/kernel/irq.c2
-rw-r--r--arch/sh64/kernel/irq_intc.c4
-rw-r--r--arch/sh64/kernel/pcibios.c4
-rw-r--r--arch/sh64/kernel/setup.c2
-rw-r--r--arch/sh64/mach-cayman/irq.c2
-rw-r--r--arch/sparc/kernel/ioport.c8
-rw-r--r--arch/sparc/kernel/of_device.c2
-rw-r--r--arch/sparc/kernel/pcic.c2
-rw-r--r--arch/sparc/kernel/prom.c106
-rw-r--r--arch/sparc/kernel/setup.c2
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/iomap.c48
-rw-r--r--arch/sparc64/kernel/auxio.c3
-rw-r--r--arch/sparc64/kernel/irq.c73
-rw-r--r--arch/sparc64/kernel/of_device.c3
-rw-r--r--arch/sparc64/kernel/pci.c2
-rw-r--r--arch/sparc64/kernel/prom.c107
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/mm/fault.c36
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/v850/kernel/irq.c6
-rw-r--r--arch/v850/kernel/rte_mb_a_pci.c2
-rw-r--r--arch/x86_64/Kconfig53
-rw-r--r--arch/x86_64/Kconfig.debug18
-rw-r--r--arch/x86_64/Makefile4
-rw-r--r--arch/x86_64/boot/Makefile9
-rw-r--r--arch/x86_64/boot/compressed/misc.c46
-rw-r--r--arch/x86_64/boot/tools/build.c6
-rw-r--r--arch/x86_64/boot/video.S19
-rw-r--r--arch/x86_64/crypto/aes-x86_64-asm.S22
-rw-r--r--arch/x86_64/crypto/aes.c20
-rw-r--r--arch/x86_64/defconfig159
-rw-r--r--arch/x86_64/ia32/fpu32.c1
-rw-r--r--arch/x86_64/ia32/ia32_signal.c2
-rw-r--r--arch/x86_64/ia32/ia32entry.S11
-rw-r--r--arch/x86_64/ia32/ptrace32.c43
-rw-r--r--arch/x86_64/ia32/sys_ia32.c25
-rw-r--r--arch/x86_64/kernel/Makefile8
-rw-r--r--arch/x86_64/kernel/aperture.c26
-rw-r--r--arch/x86_64/kernel/apic.c32
-rw-r--r--arch/x86_64/kernel/asm-offsets.c3
-rw-r--r--arch/x86_64/kernel/crash.c6
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S115
-rw-r--r--arch/x86_64/kernel/genapic_flat.c30
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c22
-rw-r--r--arch/x86_64/kernel/io_apic.c64
-rw-r--r--arch/x86_64/kernel/irq.c48
-rw-r--r--arch/x86_64/kernel/k8.c118
-rw-r--r--arch/x86_64/kernel/machine_kexec.c4
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/mce_amd.c506
-rw-r--r--arch/x86_64/kernel/module.c38
-rw-r--r--arch/x86_64/kernel/nmi.c91
-rw-r--r--arch/x86_64/kernel/pci-calgary.c1018
-rw-r--r--arch/x86_64/kernel/pci-dma.c55
-rw-r--r--arch/x86_64/kernel/pci-gart.c155
-rw-r--r--arch/x86_64/kernel/pci-nommu.c9
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/reboot.c1
-rw-r--r--arch/x86_64/kernel/setup.c276
-rw-r--r--arch/x86_64/kernel/setup64.c3
-rw-r--r--arch/x86_64/kernel/signal.c3
-rw-r--r--arch/x86_64/kernel/smp.c16
-rw-r--r--arch/x86_64/kernel/smpboot.c31
-rw-r--r--arch/x86_64/kernel/tce.c202
-rw-r--r--arch/x86_64/kernel/time.c87
-rw-r--r--arch/x86_64/kernel/traps.c83
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S29
-rw-r--r--arch/x86_64/kernel/vsyscall.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c114
-rw-r--r--arch/x86_64/lib/csum-partial.c1
-rw-r--r--arch/x86_64/lib/csum-wrappers.c1
-rw-r--r--arch/x86_64/lib/delay.c5
-rw-r--r--arch/x86_64/lib/memmove.c4
-rw-r--r--arch/x86_64/lib/usercopy.c13
-rw-r--r--arch/x86_64/mm/fault.c47
-rw-r--r--arch/x86_64/mm/init.c121
-rw-r--r--arch/x86_64/mm/ioremap.c5
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--arch/xtensa/Makefile2
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--arch/xtensa/kernel/pci.c6
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/ll_rw_blk.c10
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/aes.c14
-rw-r--r--crypto/anubis.c13
-rw-r--r--crypto/api.c17
-rw-r--r--crypto/arc4.c9
-rw-r--r--crypto/blowfish.c19
-rw-r--r--crypto/cast5.c14
-rw-r--r--crypto/cast6.c15
-rw-r--r--crypto/cipher.c14
-rw-r--r--crypto/compress.c15
-rw-r--r--crypto/crc32c.c19
-rw-r--r--crypto/crypto_null.c17
-rw-r--r--crypto/deflate.c23
-rw-r--r--crypto/des.c27
-rw-r--r--crypto/digest.c51
-rw-r--r--crypto/khazad.c21
-rw-r--r--crypto/md4.c12
-rw-r--r--crypto/md5.c12
-rw-r--r--crypto/michael_mic.c20
-rw-r--r--crypto/serpent.c27
-rw-r--r--crypto/sha1.c18
-rw-r--r--crypto/sha256.c19
-rw-r--r--crypto/sha512.c29
-rw-r--r--crypto/tcrypt.c179
-rw-r--r--crypto/tcrypt.h36
-rw-r--r--crypto/tea.c55
-rw-r--r--crypto/tgr192.c33
-rw-r--r--crypto/twofish.c14
-rw-r--r--crypto/wp512.c24
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_memhotplug.c146
-rw-r--r--drivers/acpi/numa.c15
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/atm/ambassador.c3
-rw-r--r--drivers/atm/firestream.c10
-rw-r--r--drivers/base/cpu.c28
-rw-r--r--drivers/base/dmapool.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/node.c61
-rw-r--r--drivers/base/power/resume.c6
-rw-r--r--drivers/base/power/suspend.c13
-rw-r--r--drivers/base/topology.c4
-rw-r--r--drivers/block/loop.c28
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/block/sx8.c5
-rw-r--r--drivers/bluetooth/dtl1_cs.c3
-rw-r--r--drivers/char/Kconfig56
-rw-r--r--drivers/char/Makefile4
-rw-r--r--drivers/char/agp/Kconfig4
-rw-r--r--drivers/char/agp/amd-k7-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c81
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/agp/sgi-agp.c5
-rw-r--r--drivers/char/applicom.c9
-rw-r--r--drivers/char/drm/drm_memory_debug.h2
-rw-r--r--drivers/char/drm/via_dmablit.c2
-rw-r--r--drivers/char/epca.c2
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/hvcs.c11
-rw-r--r--drivers/char/hw_random.c698
-rw-r--r--drivers/char/hw_random/Kconfig90
-rw-r--r--drivers/char/hw_random/Makefile11
-rw-r--r--drivers/char/hw_random/amd-rng.c152
-rw-r--r--drivers/char/hw_random/core.c354
-rw-r--r--drivers/char/hw_random/geode-rng.c128
-rw-r--r--drivers/char/hw_random/intel-rng.c189
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c73
-rw-r--r--drivers/char/hw_random/omap-rng.c208
-rw-r--r--drivers/char/hw_random/via-rng.c183
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c23
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c69
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c18
-rw-r--r--drivers/char/istallion.c1819
-rw-r--r--drivers/char/keyboard.c26
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/mxser.c1
-rw-r--r--drivers/char/n_tty.c6
-rw-r--r--drivers/char/nsc_gpio.c142
-rw-r--r--drivers/char/pc8736x_gpio.c340
-rw-r--r--drivers/char/pty.c2
-rw-r--r--drivers/char/rio/riointr.c2
-rw-r--r--drivers/char/scx200_gpio.c162
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/stallion.c211
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tty_io.c75
-rw-r--r--drivers/char/vr41xx_giu.c4
-rw-r--r--drivers/char/vt.c580
-rw-r--r--drivers/char/watchdog/at91_wdt.c82
-rw-r--r--drivers/char/watchdog/i8xx_tco.c28
-rw-r--r--drivers/char/watchdog/pcwd_pci.c30
-rw-r--r--drivers/char/watchdog/pcwd_usb.c23
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/acpi_pm.c177
-rw-r--r--drivers/clocksource/cyclone.c119
-rw-r--r--drivers/clocksource/scx200_hrt.c101
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/crypto/padlock-aes.c39
-rw-r--r--drivers/dma/ioatdma.c5
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm80.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-lib.c4
-rw-r--r--drivers/ide/ide-timing.h8
-rw-r--r--drivers/ide/pci/aec62xx.c19
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/ide/pci/cmd64x.c15
-rw-r--r--drivers/ide/pci/hpt34x.c2
-rw-r--r--drivers/ide/pci/pdc202xx_new.c6
-rw-r--r--drivers/ide/pci/pdc202xx_old.c159
-rw-r--r--drivers/ide/pci/piix.c12
-rw-r--r--drivers/ide/pci/sc1200.c4
-rw-r--r--drivers/ide/pci/serverworks.c53
-rw-r--r--drivers/ide/pci/siimage.c62
-rw-r--r--drivers/ide/pci/sl82c105.c4
-rw-r--r--drivers/ide/pci/slc90e66.c11
-rw-r--r--drivers/ieee1394/eth1394.c3
-rw-r--r--drivers/ieee1394/hosts.c2
-rw-r--r--drivers/ieee1394/ieee1394_core.h2
-rw-r--r--drivers/ieee1394/ohci1394.c17
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/mad.c9
-rw-r--r--drivers/infiniband/core/mad_rmpp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig11
-rw-r--r--drivers/infiniband/ulp/iser/Makefile4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c790
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h354
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c738
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c401
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c827
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/input.c86
-rw-r--r--drivers/input/joydev.c69
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/analog.c23
-rw-r--r--drivers/input/joystick/cobra.c3
-rw-r--r--drivers/input/joystick/db9.c5
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip.c3
-rw-r--r--drivers/input/joystick/guillemot.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/interact.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/sidewinder.c15
-rw-r--r--drivers/input/joystick/spaceball.c2
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/keyboard/atkbd.c219
-rw-r--r--drivers/input/keyboard/lkkbd.c9
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/sunkbd.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/misc/wistron_btns.c19
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c39
-rw-r--r--drivers/input/mouse/sermouse.c2
-rw-r--r--drivers/input/mouse/vsxxxaa.c22
-rw-r--r--drivers/input/mousedev.c42
-rw-r--r--drivers/input/serio/ct82c710.c6
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/tsdev.c22
-rw-r--r--drivers/isdn/capi/capi.c54
-rw-r--r--drivers/isdn/divert/isdn_divert.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c298
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/ev-layer.c13
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/q931.c4
-rw-r--r--drivers/isdn/hisax/telespci.c5
-rw-r--r--drivers/isdn/i4l/isdn_common.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c4
-rw-r--r--drivers/leds/led-core.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/macintosh/Makefile2
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/macintosh/via-pmu-event.c80
-rw-r--r--drivers/macintosh/via-pmu-event.h8
-rw-r--r--drivers/macintosh/via-pmu.c8
-rw-r--r--drivers/md/Kconfig46
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bitmap.c483
-rw-r--r--drivers/md/dm-crypt.c56
-rw-r--r--drivers/md/dm-emc.c40
-rw-r--r--drivers/md/dm-exception-store.c67
-rw-r--r--drivers/md/dm-ioctl.c109
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-log.c157
-rw-r--r--drivers/md/dm-mpath.c43
-rw-r--r--drivers/md/dm-raid1.c97
-rw-r--r--drivers/md/dm-round-robin.c6
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c25
-rw-r--r--drivers/md/dm-table.c57
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-zero.c8
-rw-r--r--drivers/md/dm.c184
-rw-r--r--drivers/md/dm.h81
-rw-r--r--drivers/md/kcopyd.c4
-rw-r--r--drivers/md/linear.c74
-rw-r--r--drivers/md/md.c634
-rw-r--r--drivers/md/raid1.c43
-rw-r--r--drivers/md/raid10.c77
-rw-r--r--drivers/md/raid5.c1307
-rw-r--r--drivers/md/raid6main.c2427
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c27
-rw-r--r--drivers/media/dvb/ttpci/av7110.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c25
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c50
-rw-r--r--drivers/media/video/Kconfig14
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c10
-rw-r--r--drivers/media/video/cx2341x.c57
-rw-r--r--drivers/media/video/cx88/Kconfig15
-rw-r--r--drivers/media/video/cx88/Makefile5
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c8
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c30
-rw-r--r--drivers/media/video/cx88/cx88-cards.c5
-rw-r--r--drivers/media/video/cx88/cx88-core.c6
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c1
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c4
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c2
-rw-r--r--drivers/media/video/cx88/cx88-video.c10
-rw-r--r--drivers/media/video/cx88/cx88.h7
-rw-r--r--drivers/media/video/pvrusb2/Kconfig62
-rw-r--r--drivers/media/video/pvrusb2/Makefile18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c204
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c230
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.h92
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.c593
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.h123
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c279
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debug.h67
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.c478
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.c126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.c164
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c418
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.h42
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h384
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c3120
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h335
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c115
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c232
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c937
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.h96
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.c695
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.h102
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.c513
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.h50
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c172
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.c408
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.h60
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c865
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.c122
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-util.h63
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c1126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c253
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.h52
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.c170
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2.h43
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c8
-rw-r--r--drivers/media/video/stradis.c4
-rw-r--r--drivers/media/video/tda9887.c31
-rw-r--r--drivers/media/video/tuner-core.c18
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c2
-rw-r--r--drivers/media/video/v4l2-common.c14
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/fusion/mptsas.c3
-rw-r--r--drivers/message/i2o/iop.c15
-rw-r--r--drivers/mfd/ucb1x00-core.c4
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c1
-rw-r--r--drivers/mtd/chips/jedec.c1
-rw-r--r--drivers/mtd/chips/map_absent.c3
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/ms02-nv.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/phram.c1
-rw-r--r--drivers/mtd/devices/pmc551.c11
-rw-r--r--drivers/mtd/devices/slram.c1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/amd76xrom.c5
-rw-r--r--drivers/mtd/maps/ichxrom.c5
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/physmap.c2
-rw-r--r--drivers/mtd/maps/scx200_docflash.c5
-rw-r--r--drivers/mtd/maps/sun_uflash.c198
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c164
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/net/3c501.c4
-rw-r--r--drivers/net/3c59x.c85
-rw-r--r--drivers/net/8139cp.c11
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/dl2k.h12
-rw-r--r--drivers/net/dm9000.c34
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/eepro100.c5
-rw-r--r--drivers/net/epic100.c20
-rw-r--r--drivers/net/fealnx.c10
-rw-r--r--drivers/net/fec.c310
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c3
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/irda/irport.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c8
-rw-r--r--drivers/net/natsemi.c100
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/tulip/de2104x.c9
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/tulip/winbond-840.c26
-rw-r--r--drivers/net/typhoon.c5
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/dscc4.c12
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/pc300_drv.c4
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c37
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2200.c22
-rw-r--r--drivers/net/yellowfin.c24
-rw-r--r--drivers/parisc/dino.c4
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/gsc.c8
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/parport_gsc.c2
-rw-r--r--drivers/parport/parport_gsc.h2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/procfs.c2
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c9
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c10
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c5
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c18
-rw-r--r--drivers/pci/msi-apic.c1
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/proc.c20
-rw-r--r--drivers/pci/rom.c10
-rw-r--r--drivers/pci/setup-bus.c6
-rw-r--r--drivers/pci/setup-res.c34
-rw-r--r--drivers/pcmcia/hd64465_ss.c6
-rw-r--r--drivers/pcmcia/i82365.c5
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/pcmcia/pd6729.c3
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c26
-rw-r--r--drivers/pcmcia/tcic.c5
-rw-r--r--drivers/pnp/interface.c8
-rw-r--r--drivers/pnp/manager.c15
-rw-r--r--drivers/pnp/resource.c8
-rw-r--r--drivers/rapidio/rio-access.c4
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-rs5c348.c246
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/s390/block/dasd.c105
-rw-r--r--drivers/s390/block/dasd_3370_erp.c27
-rw-r--r--drivers/s390/block/dasd_3990_erp.c189
-rw-r--r--drivers/s390/block/dasd_9336_erp.c27
-rw-r--r--drivers/s390/block/dasd_9343_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c102
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/block/dasd_diag.h2
-rw-r--r--drivers/s390/block/dasd_eckd.c337
-rw-r--r--drivers/s390/block/dasd_eckd.h24
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/block/dasd_fba.c49
-rw-r--r--drivers/s390/block/dasd_fba.h2
-rw-r--r--drivers/s390/block/dasd_int.h39
-rw-r--r--drivers/s390/block/dasd_ioctl.c12
-rw-r--r--drivers/s390/char/raw3270.c67
-rw-r--r--drivers/s390/cio/blacklist.c35
-rw-r--r--drivers/s390/cio/ccwgroup.c17
-rw-r--r--drivers/s390/cio/chsc.c6
-rw-r--r--drivers/s390/cio/cmf.c623
-rw-r--r--drivers/s390/cio/css.c63
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device.h10
-rw-r--r--drivers/s390/cio/device_fsm.c20
-rw-r--r--drivers/s390/cio/device_ops.c10
-rw-r--r--drivers/s390/net/lcs.c7
-rw-r--r--drivers/s390/s390mach.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c14
-rw-r--r--drivers/sbus/char/cpwatchdog.c2
-rw-r--r--drivers/sbus/char/openprom.c593
-rw-r--r--drivers/sbus/char/riowatchdog.c4
-rw-r--r--drivers/scsi/Kconfig12
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c5
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/ahci.c2
-rw-r--r--drivers/scsi/ata_piix.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/ibmmca.c8
-rw-r--r--drivers/scsi/imm.c3
-rw-r--r--drivers/scsi/imm.h2
-rw-r--r--drivers/scsi/ips.c4
-rw-r--r--drivers/scsi/libata-core.c153
-rw-r--r--drivers/scsi/libata-eh.c63
-rw-r--r--drivers/scsi/libata-scsi.c4
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/ppa.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_sil.c31
-rw-r--r--drivers/scsi/sata_sil24.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c10
-rw-r--r--drivers/scsi/sata_vsc.c12
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/serial/68328serial.c102
-rw-r--r--drivers/serial/8250_pci.c4
-rw-r--r--drivers/serial/8250_pnp.c4
-rw-r--r--drivers/serial/crisv10.c6
-rw-r--r--drivers/serial/jsm/jsm_tty.c7
-rw-r--r--drivers/serial/mcfserial.c28
-rw-r--r--drivers/sn/ioc3.c2
-rw-r--r--drivers/sn/ioc4.c2
-rw-r--r--drivers/spi/spi.c1
-rw-r--r--drivers/telephony/ixj.c2
-rw-r--r--drivers/usb/host/hc_crisv10.c3
-rw-r--r--drivers/usb/host/sl811-hcd.c10
-rw-r--r--drivers/usb/input/fixp-arith.h15
-rw-r--r--drivers/usb/input/hid-debug.h2
-rw-r--r--drivers/usb/serial/ir-usb.c3
-rw-r--r--drivers/usb/serial/whiteheat.c19
-rw-r--r--drivers/usb/storage/usb.h4
-rw-r--r--drivers/video/Kconfig49
-rw-r--r--drivers/video/Makefile9
-rw-r--r--drivers/video/aty/aty128fb.c36
-rw-r--r--drivers/video/aty/atyfb_base.c43
-rw-r--r--drivers/video/aty/mach64_accel.c10
-rw-r--r--drivers/video/aty/mach64_cursor.c33
-rw-r--r--drivers/video/aty/radeon_backlight.c6
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/au1100fb.c44
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c123
-rw-r--r--drivers/video/cfbimgblt.c1
-rw-r--r--drivers/video/cirrusfb.c2
-rw-r--r--drivers/video/console/fbcon.c361
-rw-r--r--drivers/video/console/fbcon.h1
-rw-r--r--drivers/video/console/mdacon.c2
-rw-r--r--drivers/video/console/newport_con.c38
-rw-r--r--drivers/video/console/promcon.c4
-rw-r--r--drivers/video/console/sticon.c2
-rw-r--r--drivers/video/console/vgacon.c40
-rw-r--r--drivers/video/epson1355fb.c29
-rw-r--r--drivers/video/fbcvt.c1
-rw-r--r--drivers/video/fbmem.c58
-rw-r--r--drivers/video/fbmon.c33
-rw-r--r--drivers/video/fbsysfs.c52
-rw-r--r--drivers/video/geode/gx1fb_core.c3
-rw-r--r--drivers/video/geode/gxfb_core.c3
-rw-r--r--drivers/video/i810/i810_main.c3
-rw-r--r--drivers/video/imacfb.c345
-rw-r--r--drivers/video/macmodes.c6
-rw-r--r--drivers/video/macmodes.h7
-rw-r--r--drivers/video/modedb.c11
-rw-r--r--drivers/video/neofb.c32
-rw-r--r--drivers/video/nvidia/nv_hw.c10
-rw-r--r--drivers/video/nvidia/nvidia.c370
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--drivers/video/s3c2410fb.c17
-rw-r--r--drivers/video/savage/savagefb.h45
-rw-r--r--drivers/video/savage/savagefb_driver.c1475
-rw-r--r--drivers/video/sgivwfb.c6
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/skeletonfb.c5
-rw-r--r--drivers/video/tgafb.c1
-rw-r--r--drivers/video/vesafb.c59
-rw-r--r--drivers/video/vfb.c29
-rw-r--r--drivers/video/vga16fb.c40
-rw-r--r--fs/9p/mux.c4
-rw-r--r--fs/9p/v9fs_vfs.h2
-rw-r--r--fs/9p/vfs_addr.c2
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/Kconfig54
-rw-r--r--fs/adfs/inode.c2
-rw-r--r--fs/affs/affs.h6
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/symlink.c2
-rw-r--r--fs/afs/cell.c3
-rw-r--r--fs/afs/file.c2
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/kafsasyncd.c9
-rw-r--r--fs/afs/server.c6
-rw-r--r--fs/afs/vlocation.c6
-rw-r--r--fs/afs/vnode.c3
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs4/expire.c3
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/bfs.h2
-rw-r--r--fs/bfs/file.c2
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/buffer.c5
-rw-r--r--fs/cifs/CHANGES17
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README39
-rw-r--r--fs/cifs/asn1.c10
-rw-r--r--fs/cifs/cifs_debug.c134
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsencrypt.c140
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h71
-rw-r--r--fs/cifs/cifspdu.h98
-rw-r--r--fs/cifs/cifsproto.h14
-rw-r--r--fs/cifs/cifssmb.c287
-rw-r--r--fs/cifs/connect.c498
-rw-r--r--fs/cifs/dir.c15
-rw-r--r--fs/cifs/fcntl.c4
-rw-r--r--fs/cifs/file.c54
-rw-r--r--fs/cifs/inode.c39
-rw-r--r--fs/cifs/link.c7
-rw-r--r--fs/cifs/misc.c10
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/ntlmssp.c143
-rw-r--r--fs/cifs/readdir.c184
-rw-r--r--fs/cifs/sess.c538
-rw-r--r--fs/cifs/smbencrypt.c1
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/coda/psdev.c2
-rw-r--r--fs/coda/symlink.c2
-rw-r--r--fs/coda/upcall.c2
-rw-r--r--fs/compat.c16
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/configfs/inode.c2
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/dquot.c4
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/efs/symlink.c2
-rw-r--r--fs/exec.c147
-rw-r--r--fs/ext2/ext2.h6
-rw-r--r--fs/ext2/inode.c6
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/freevxfs/vxfs_immed.c2
-rw-r--r--fs/freevxfs/vxfs_inode.c6
-rw-r--r--fs/freevxfs/vxfs_subr.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/hfs/hfs_fs.h4
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfsplus/hfsplus_fs.h4
-rw-r--r--fs/hfsplus/inode.c4
-rw-r--r--fs/hostfs/hostfs_kern.c6
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hpfs/hpfs_fn.h4
-rw-r--r--fs/hpfs/namei.c2
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c2
-rw-r--r--fs/isofs/compress.c2
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/isofs/isofs.h2
-rw-r--r--fs/isofs/rock.c2
-rw-r--r--fs/isofs/zisofs.h2
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jffs/inode-v23.c4
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/erase.c34
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/gc.c6
-rw-r--r--fs/jffs2/jffs2_fs_sb.h3
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.c3
-rw-r--r--fs/jffs2/nodemgmt.c24
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/readinode.c1
-rw-r--r--fs/jffs2/scan.c55
-rw-r--r--fs/jffs2/summary.c43
-rw-r--r--fs/jffs2/wbuf.c3
-rw-r--r--fs/jffs2/xattr.c632
-rw-r--r--fs/jffs2/xattr.h21
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_extent.c8
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/jfs_metapage.h2
-rw-r--r--fs/libfs.c10
-rw-r--r--fs/minix/inode.c2
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/ncpfs/symlink.c2
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4state.c5
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/ntfs/aops.c4
-rw-r--r--fs/ntfs/ntfs.h4
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlm/dlmast.c15
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h63
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c33
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c6
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h30
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c103
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/dlm/dlmlock.c73
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c448
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c593
-rw-r--r--fs/ocfs2/dlm/dlmthread.c74
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c13
-rw-r--r--fs/ocfs2/dlm/userdlm.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/journal.c5
-rw-r--r--fs/ocfs2/vote.c8
-rw-r--r--fs/openpromfs/inode.c1158
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/proc/base.c1086
-rw-r--r--fs/proc/inode.c11
-rw-r--r--fs/proc/internal.h22
-rw-r--r--fs/proc/task_mmu.c140
-rw-r--r--fs/proc/task_nommu.c21
-rw-r--r--fs/qnx4/inode.c2
-rw-r--r--fs/ramfs/file-mmu.c2
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/ramfs/internal.h2
-rw-r--r--fs/reiserfs/file.c8
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/romfs/inode.c2
-rw-r--r--fs/smbfs/file.c2
-rw-r--r--fs/smbfs/proto.h2
-rw-r--r--fs/smbfs/request.c6
-rw-r--r--fs/smbfs/smbiod.c3
-rw-r--r--fs/sysfs/dir.c10
-rw-r--r--fs/sysfs/inode.c2
-rw-r--r--fs/sysv/itree.c2
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/symlink.c2
-rw-r--r--fs/udf/udfdecl.h6
-rw-r--r--fs/ufs/inode.c119
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h2
-rw-r--r--fs/xfs/xfs_behavior.h3
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_log.c4
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c21
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_trans.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c11
-rw-r--r--include/asm-alpha/core_t2.h2
-rw-r--r--include/asm-alpha/hw_irq.h2
-rw-r--r--include/asm-arm/arch-at91rm9200/memory.h2
-rw-r--r--include/asm-arm/arch-h720x/memory.h2
-rw-r--r--include/asm-arm/arch-imx/memory.h6
-rw-r--r--include/asm-arm/arch-ixp23xx/ixp23xx.h11
-rw-r--r--include/asm-arm/arch-ixp23xx/platform.h10
-rw-r--r--include/asm-arm/arch-ixp23xx/uncompress.h2
-rw-r--r--include/asm-arm/arch-s3c2410/regs-dsc.h16
-rw-r--r--include/asm-arm/arch-s3c2410/regs-nand.h48
-rw-r--r--include/asm-arm/assembler.h36
-rw-r--r--include/asm-arm/bugs.h4
-rw-r--r--include/asm-arm/domain.h7
-rw-r--r--include/asm-arm/fpstate.h8
-rw-r--r--include/asm-arm/hardware/locomo.h5
-rw-r--r--include/asm-arm/mach/map.h9
-rw-r--r--include/asm-arm/mach/pci.h2
-rw-r--r--include/asm-arm/memory.h75
-rw-r--r--include/asm-arm/mmu.h16
-rw-r--r--include/asm-arm/mmu_context.h2
-rw-r--r--include/asm-arm/page-nommu.h51
-rw-r--r--include/asm-arm/page.h8
-rw-r--r--include/asm-arm/pgalloc.h8
-rw-r--r--include/asm-arm/pgtable-nommu.h123
-rw-r--r--include/asm-arm/pgtable.h10
-rw-r--r--include/asm-arm/proc-fns.h4
-rw-r--r--include/asm-arm/ptrace.h5
-rw-r--r--include/asm-arm/thread_info.h6
-rw-r--r--include/asm-arm/uaccess.h184
-rw-r--r--include/asm-arm/ucontext.h14
-rw-r--r--include/asm-cris/hw_irq.h2
-rw-r--r--include/asm-cris/irq.h5
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h28
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h12
-rw-r--r--include/asm-i386/cpu.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/delay.h2
-rw-r--r--include/asm-i386/dwarf2.h54
-rw-r--r--include/asm-i386/elf.h53
-rw-r--r--include/asm-i386/fixmap.h10
-rw-r--r--include/asm-i386/hw_irq.h12
-rw-r--r--include/asm-i386/intel_arch_perfmon.h19
-rw-r--r--include/asm-i386/k8.h1
-rw-r--r--include/asm-i386/kdebug.h2
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/local.h26
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h7
-rw-r--r--include/asm-i386/mach-default/mach_timer.h4
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h3
-rw-r--r--include/asm-i386/mach-visws/setup_arch.h3
-rw-r--r--include/asm-i386/mmu.h1
-rw-r--r--include/asm-i386/nmi.h28
-rw-r--r--include/asm-i386/node.h29
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/processor.h11
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-i386/thread_info.h18
-rw-r--r--include/asm-i386/timer.h57
-rw-r--r--include/asm-i386/timex.h34
-rw-r--r--include/asm-i386/topology.h11
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-i386/unwind.h98
-rw-r--r--include/asm-ia64/hw_irq.h3
-rw-r--r--include/asm-ia64/irq.h5
-rw-r--r--include/asm-ia64/kdebug.h2
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ia64/nodedata.h12
-rw-r--r--include/asm-ia64/sn/sn_sal.h10
-rw-r--r--include/asm-ia64/thread_info.h5
-rw-r--r--include/asm-ia64/topology.h1
-rw-r--r--include/asm-m32r/hw_irq.h5
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68knommu/bootstd.h12
-rw-r--r--include/asm-m68knommu/cacheflush.h13
-rw-r--r--include/asm-m68knommu/coldfire.h76
-rw-r--r--include/asm-m68knommu/irq.h2
-rw-r--r--include/asm-m68knommu/m5249sim.h2
-rw-r--r--include/asm-m68knommu/m532xsim.h2238
-rw-r--r--include/asm-m68knommu/mcfcache.h15
-rw-r--r--include/asm-m68knommu/mcfpit.h8
-rw-r--r--include/asm-m68knommu/mcfsim.h3
-rw-r--r--include/asm-m68knommu/mcftimer.h30
-rw-r--r--include/asm-m68knommu/mcfuart.h4
-rw-r--r--include/asm-m68knommu/page_offset.h43
-rw-r--r--include/asm-m68knommu/processor.h28
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-mips/hw_irq.h8
-rw-r--r--include/asm-mips/mach-mips/irq.h6
-rw-r--r--include/asm-parisc/hw_irq.h9
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-powerpc/hw_irq.h18
-rw-r--r--include/asm-powerpc/irq.h5
-rw-r--r--include/asm-powerpc/kdebug.h2
-rw-r--r--include/asm-powerpc/kprobes.h2
-rw-r--r--include/asm-powerpc/pci.h2
-rw-r--r--include/asm-powerpc/topology.h5
-rw-r--r--include/asm-ppc/pci.h2
-rw-r--r--include/asm-s390/bitops.h42
-rw-r--r--include/asm-s390/cio.h2
-rw-r--r--include/asm-s390/cmb.h4
-rw-r--r--include/asm-s390/dasd.h8
-rw-r--r--include/asm-s390/thread_info.h1
-rw-r--r--include/asm-s390/unistd.h4
-rw-r--r--include/asm-sh/hw_irq.h5
-rw-r--r--include/asm-sh64/hw_irq.h1
-rw-r--r--include/asm-sparc/io.h16
-rw-r--r--include/asm-sparc/prom.h10
-rw-r--r--include/asm-sparc64/dma-mapping.h43
-rw-r--r--include/asm-sparc64/floppy.h50
-rw-r--r--include/asm-sparc64/kdebug.h2
-rw-r--r--include/asm-sparc64/kprobes.h1
-rw-r--r--include/asm-sparc64/prom.h10
-rw-r--r--include/asm-sparc64/topology.h3
-rw-r--r--include/asm-um/hw_irq.h3
-rw-r--r--include/asm-v850/hw_irq.h4
-rw-r--r--include/asm-x86_64/alternative.h146
-rw-r--r--include/asm-x86_64/apic.h26
-rw-r--r--include/asm-x86_64/atomic.h42
-rw-r--r--include/asm-x86_64/bitops.h7
-rw-r--r--include/asm-x86_64/calgary.h66
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/dma-mapping.h17
-rw-r--r--include/asm-x86_64/dma.h2
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hpet.h2
-rw-r--r--include/asm-x86_64/hw_irq.h13
-rw-r--r--include/asm-x86_64/ia32_unistd.h308
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h19
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/kdebug.h2
-rw-r--r--include/asm-x86_64/kprobes.h1
-rw-r--r--include/asm-x86_64/local.h26
-rw-r--r--include/asm-x86_64/mce.h13
-rw-r--r--include/asm-x86_64/mutex.h4
-rw-r--r--include/asm-x86_64/nmi.h30
-rw-r--r--include/asm-x86_64/pci.h4
-rw-r--r--include/asm-x86_64/pgtable.h6
-rw-r--r--include/asm-x86_64/processor.h5
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h8
-rw-r--r--include/asm-x86_64/semaphore.h8
-rw-r--r--include/asm-x86_64/smp.h2
-rw-r--r--include/asm-x86_64/spinlock.h10
-rw-r--r--include/asm-x86_64/string.h3
-rw-r--r--include/asm-x86_64/system.h86
-rw-r--r--include/asm-x86_64/tce.h47
-rw-r--r--include/asm-x86_64/thread_info.h19
-rw-r--r--include/asm-x86_64/topology.h10
-rw-r--r--include/asm-x86_64/unwind.h106
-rw-r--r--include/asm-xtensa/hw_irq.h4
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/linux/ac97_codec.h2
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/bitmap.h5
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/clocksource.h185
-rw-r--r--include/linux/coda_linux.h4
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compat_ioctl.h5
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cpu.h14
-rw-r--r--include/linux/crypto.h34
-rw-r--r--include/linux/device-mapper.h111
-rw-r--r--include/linux/dm-ioctl.h6
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/efs_fs.h2
-rw-r--r--include/linux/fb.h19
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/futex.h12
-rw-r--r--include/linux/hw_random.h50
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/ioport.h30
-rw-r--r--include/linux/ipmi.h4
-rw-r--r--include/linux/irq.h346
-rw-r--r--include/linux/isdn/tpam.h55
-rw-r--r--include/linux/jffs2.h1
-rw-r--r--include/linux/kbd_kern.h4
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/key.h21
-rw-r--r--include/linux/libata.h15
-rw-r--r--include/linux/license.h14
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/memory_hotplug.h73
-rw-r--r--include/linux/mm.h13
-rw-r--r--include/linux/module.h23
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/node.h17
-rw-r--r--include/linux/nsc_gpio.h42
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/plist.h248
-rw-r--r--include/linux/pnp.h7
-rw-r--r--include/linux/poison.h58
-rw-r--r--include/linux/proc_fs.h16
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/raid/bitmap.h11
-rw-r--r--include/linux/raid/linear.h2
-rw-r--r--include/linux/raid/md.h4
-rw-r--r--include/linux/raid/md_k.h10
-rw-r--r--include/linux/raid/md_p.h5
-rw-r--r--include/linux/raid/raid10.h7
-rw-r--r--include/linux/raid/raid5.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/rtmutex.h117
-rw-r--r--include/linux/sched.h62
-rw-r--r--include/linux/scx200.h7
-rw-r--r--include/linux/scx200_gpio.h21
-rw-r--r--include/linux/security.h11
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/suspend.h1
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/time.h28
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h3
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_flip.h2
-rw-r--r--include/linux/types.h7
-rw-r--r--include/linux/ufs_fs.h2
-rw-r--r--include/linux/unwind.h127
-rw-r--r--include/linux/videodev2.h6
-rw-r--r--include/linux/watchdog.h10
-rw-r--r--include/media/cx2341x.h10
-rw-r--r--include/net/tipc/tipc_bearer.h12
-rw-r--r--init/Kconfig23
-rw-r--r--init/initramfs.c36
-rw-r--r--init/main.c5
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/cpu.c18
-rw-r--r--kernel/cpuset.c26
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c34
-rw-r--r--kernel/futex.c1067
-rw-r--r--kernel/futex_compat.c14
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/autoprobe.c56
-rw-r--r--kernel/irq/chip.c525
-rw-r--r--kernel/irq/handle.c118
-rw-r--r--kernel/irq/internals.h46
-rw-r--r--kernel/irq/manage.c156
-rw-r--r--kernel/irq/migration.c20
-rw-r--r--kernel/irq/proc.c30
-rw-r--r--kernel/irq/resend.c78
-rw-r--r--kernel/irq/spurious.c33
-rw-r--r--kernel/kprobes.c58
-rw-r--r--kernel/module.c126
-rw-r--r--kernel/mutex-debug.c17
-rw-r--r--kernel/mutex-debug.h25
-rw-r--r--kernel/mutex.c21
-rw-r--r--kernel/mutex.h6
-rw-r--r--kernel/power/Kconfig13
-rw-r--r--kernel/power/power.h4
-rw-r--r--kernel/power/snapshot.c112
-rw-r--r--kernel/power/swsusp.c18
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/rcupdate.c14
-rw-r--r--kernel/rcutorture.c201
-rw-r--r--kernel/resource.c90
-rw-r--r--kernel/rtmutex-debug.c513
-rw-r--r--kernel/rtmutex-debug.h37
-rw-r--r--kernel/rtmutex-tester.c440
-rw-r--r--kernel/rtmutex.c990
-rw-r--r--kernel/rtmutex.h29
-rw-r--r--kernel/rtmutex_common.h123
-rw-r--r--kernel/sched.c1210
-rw-r--r--kernel/signal.c35
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sysctl.c38
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/time/Makefile1
-rw-r--r--kernel/time/clocksource.c349
-rw-r--r--kernel/time/jiffies.c73
-rw-r--r--kernel/timer.c400
-rw-r--r--kernel/unwind.c918
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug46
-rw-r--r--lib/Makefile1
-rw-r--r--lib/idr.c43
-rw-r--r--lib/kernel_lock.c4
-rw-r--r--lib/plist.c118
-rw-r--r--lib/vsprintf.c2
-rw-r--r--lib/zlib_inflate/inffast.c6
-rw-r--r--lib/zlib_inflate/inftrees.c18
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/filemap.c31
-rw-r--r--mm/filemap.h4
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/memory_hotplug.c126
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c21
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c3
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmscan.c39
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c8
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/netpoll.c36
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/netrom/nr_route.c12
-rw-r--r--net/rxrpc/call.c3
-rw-r--r--net/rxrpc/connection.c3
-rw-r--r--net/rxrpc/krxsecd.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c2
-rw-r--r--net/tipc/bcast.c83
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/bearer.c72
-rw-r--r--net/tipc/cluster.c22
-rw-r--r--net/tipc/config.c87
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/core.h21
-rw-r--r--net/tipc/dbg.c2
-rw-r--r--net/tipc/discover.c13
-rw-r--r--net/tipc/eth_media.c29
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/link.c217
-rw-r--r--net/tipc/name_distr.c30
-rw-r--r--net/tipc/name_table.c207
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/node.c78
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/node_subscr.c15
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/ref.c35
-rw-r--r--net/tipc/socket.c100
-rw-r--r--net/tipc/subscr.c20
-rw-r--r--net/tipc/user_reg.c2
-rw-r--r--net/tipc/zone.c19
-rw-r--r--scripts/Makefile.build14
-rw-r--r--scripts/Makefile.host8
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/basic/Makefile6
-rw-r--r--scripts/basic/split-include.c226
-rw-r--r--scripts/export_report.pl169
-rw-r--r--scripts/genksyms/genksyms.c77
-rw-r--r--scripts/genksyms/genksyms.h1
-rw-r--r--scripts/genksyms/lex.c_shipped2
-rw-r--r--scripts/genksyms/lex.l2
-rw-r--r--scripts/kconfig/conf.c23
-rw-r--r--scripts/kconfig/confdata.c491
-rw-r--r--scripts/kconfig/expr.c53
-rw-r--r--scripts/kconfig/expr.h20
-rw-r--r--scripts/kconfig/gconf.c12
-rw-r--r--scripts/kconfig/lex.zconf.c_shipped91
-rw-r--r--scripts/kconfig/lkc.h10
-rw-r--r--scripts/kconfig/lkc_proto.h5
-rw-r--r--scripts/kconfig/menu.c34
-rw-r--r--scripts/kconfig/qconf.cc1048
-rw-r--r--scripts/kconfig/qconf.h158
-rw-r--r--scripts/kconfig/symbol.c50
-rw-r--r--scripts/kconfig/util.c4
-rw-r--r--scripts/kconfig/zconf.gperf3
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped181
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped930
-rw-r--r--scripts/kconfig/zconf.y33
-rw-r--r--scripts/mod/mk_elfconfig.c6
-rw-r--r--scripts/mod/modpost.c177
-rw-r--r--scripts/mod/modpost.h4
-rwxr-xr-xscripts/package/mkspec5
-rw-r--r--scripts/rt-tester/check-all.sh22
-rw-r--r--scripts/rt-tester/rt-tester.py222
-rw-r--r--scripts/rt-tester/t2-l1-2rt-sameprio.tst99
-rw-r--r--scripts/rt-tester/t2-l1-pi.tst82
-rw-r--r--scripts/rt-tester/t2-l1-signal.tst77
-rw-r--r--scripts/rt-tester/t2-l2-2rt-deadlock.tst89
-rw-r--r--scripts/rt-tester/t3-l1-pi-1rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-2rt.tst93
-rw-r--r--scripts/rt-tester/t3-l1-pi-3rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-signal.tst98
-rw-r--r--scripts/rt-tester/t3-l1-pi-steal.tst96
-rw-r--r--scripts/rt-tester/t3-l2-pi.tst92
-rw-r--r--scripts/rt-tester/t4-l2-pi-deboost.tst123
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst183
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost.tst143
-rw-r--r--scripts/setlocalversion4
-rw-r--r--security/Kconfig20
-rw-r--r--security/dummy.c3
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c57
-rw-r--r--security/keys/keyctl.c63
-rw-r--r--security/keys/keyring.c25
-rw-r--r--security/keys/proc.c7
-rw-r--r--security/keys/process_keys.c24
-rw-r--r--security/keys/request_key.c66
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/user_defined.c25
-rw-r--r--security/selinux/hooks.c56
-rw-r--r--security/selinux/include/av_perm_to_string.h3
-rw-r--r--security/selinux/include/av_permissions.h4
-rw-r--r--security/selinux/include/objsec.h2
-rw-r--r--sound/arm/aaci.c5
-rw-r--r--sound/core/seq/seq_memory.h2
-rw-r--r--sound/drivers/mpu401/mpu401.c5
-rw-r--r--sound/isa/es18xx.c3
-rw-r--r--sound/isa/gus/interwave.c8
-rw-r--r--sound/isa/sb/sb16.c3
-rw-r--r--sound/oss/Kconfig15
-rw-r--r--sound/oss/cs4232.c2
-rw-r--r--sound/oss/forte.c5
-rw-r--r--sound/oss/sb_ess.c28
-rw-r--r--sound/oss/via82cxxx_audio.c7
-rw-r--r--sound/pci/bt87x.c5
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/sonicvibes.c4
-rw-r--r--sound/ppc/pmac.c14
-rw-r--r--sound/ppc/toonie.c0
-rw-r--r--sound/sparc/cs4231.c4
-rw-r--r--sound/sparc/dbri.c4
-rw-r--r--usr/Makefile3
1692 files changed, 64704 insertions, 24370 deletions
diff --git a/CREDITS b/CREDITS
index 70922bd656c2..66b9e7a9abff 100644
--- a/CREDITS
+++ b/CREDITS
@@ -24,6 +24,11 @@ S: C. Negri 6, bl. D3
S: Iasi 6600
S: Romania
+N: Mark Adler
+E: madler@alumni.caltech.edu
+W: http://alumnus.caltech.edu/~madler/
+D: zlib decompression
+
N: Monalisa Agrawal
E: magrawal@nortelnetworks.com
D: Basic Interphase 5575 driver with UBR and ABR support.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 5a2882d275ba..66e1cf733571 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -10,7 +10,8 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
procfs-guide.xml writing_usb_driver.xml \
kernel-api.xml journal-api.xml lsm.xml usb.xml \
- gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml
+ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
+ genericirq.xml
###
# The build process is as follows (targets):
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
new file mode 100644
index 000000000000..0f4a4b6321e4
--- /dev/null
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -0,0 +1,474 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="Generic-IRQ-Guide">
+ <bookinfo>
+ <title>Linux generic IRQ handling</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Thomas</firstname>
+ <surname>Gleixner</surname>
+ <affiliation>
+ <address>
+ <email>tglx@linutronix.de</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Ingo</firstname>
+ <surname>Molnar</surname>
+ <affiliation>
+ <address>
+ <email>mingo@elte.hu</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2005-2006</year>
+ <holder>Thomas Gleixner</holder>
+ </copyright>
+ <copyright>
+ <year>2005-2006</year>
+ <holder>Ingo Molnar</holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ The generic interrupt handling layer is designed to provide a
+ complete abstraction of interrupt handling for device drivers.
+ It is able to handle all the different types of interrupt controller
+ hardware. Device drivers use generic API functions to request, enable,
+ disable and free interrupts. The drivers do not have to know anything
+ about interrupt hardware details, so they can be used on different
+ platforms without code changes.
+ </para>
+ <para>
+ This documentation is provided to developers who want to implement
+ an interrupt subsystem based for their architecture, with the help
+ of the generic IRQ handling layer.
+ </para>
+ </chapter>
+
+ <chapter id="rationale">
+ <title>Rationale</title>
+ <para>
+ The original implementation of interrupt handling in Linux is using
+ the __do_IRQ() super-handler, which is able to deal with every
+ type of interrupt logic.
+ </para>
+ <para>
+ Originally, Russell King identified different types of handlers to
+ build a quite universal set for the ARM interrupt handler
+ implementation in Linux 2.5/2.6. He distinguished between:
+ <itemizedlist>
+ <listitem><para>Level type</para></listitem>
+ <listitem><para>Edge type</para></listitem>
+ <listitem><para>Simple type</para></listitem>
+ </itemizedlist>
+ In the SMP world of the __do_IRQ() super-handler another type
+ was identified:
+ <itemizedlist>
+ <listitem><para>Per CPU type</para></listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ This split implementation of highlevel IRQ handlers allows us to
+ optimize the flow of the interrupt handling for each specific
+ interrupt type. This reduces complexity in that particular codepath
+ and allows the optimized handling of a given type.
+ </para>
+ <para>
+ The original general IRQ implementation used hw_interrupt_type
+ structures and their ->ack(), ->end() [etc.] callbacks to
+ differentiate the flow control in the super-handler. This leads to
+ a mix of flow logic and lowlevel hardware logic, and it also leads
+ to unnecessary code duplication: for example in i386, there is a
+ ioapic_level_irq and a ioapic_edge_irq irq-type which share many
+ of the lowlevel details but have different flow handling.
+ </para>
+ <para>
+ A more natural abstraction is the clean separation of the
+ 'irq flow' and the 'chip details'.
+ </para>
+ <para>
+ Analysing a couple of architecture's IRQ subsystem implementations
+ reveals that most of them can use a generic set of 'irq flow'
+ methods and only need to add the chip level specific code.
+ The separation is also valuable for (sub)architectures
+ which need specific quirks in the irq flow itself but not in the
+ chip-details - and thus provides a more transparent IRQ subsystem
+ design.
+ </para>
+ <para>
+ Each interrupt descriptor is assigned its own highlevel flow
+ handler, which is normally one of the generic
+ implementations. (This highlevel flow handler implementation also
+ makes it simple to provide demultiplexing handlers which can be
+ found in embedded platforms on various architectures.)
+ </para>
+ <para>
+ The separation makes the generic interrupt handling layer more
+ flexible and extensible. For example, an (sub)architecture can
+ use a generic irq-flow implementation for 'level type' interrupts
+ and add a (sub)architecture specific 'edge type' implementation.
+ </para>
+ <para>
+ To make the transition to the new model easier and prevent the
+ breakage of existing implementations, the __do_IRQ() super-handler
+ is still available. This leads to a kind of duality for the time
+ being. Over time the new model should be used in more and more
+ architectures, as it enables smaller and cleaner IRQ subsystems.
+ </para>
+ </chapter>
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ None (knock on wood).
+ </para>
+ </chapter>
+
+ <chapter id="Abstraction">
+ <title>Abstraction layers</title>
+ <para>
+ There are three main levels of abstraction in the interrupt code:
+ <orderedlist>
+ <listitem><para>Highlevel driver API</para></listitem>
+ <listitem><para>Highlevel IRQ flow handlers</para></listitem>
+ <listitem><para>Chiplevel hardware encapsulation</para></listitem>
+ </orderedlist>
+ </para>
+ <sect1>
+ <title>Interrupt control flow</title>
+ <para>
+ Each interrupt is described by an interrupt descriptor structure
+ irq_desc. The interrupt is referenced by an 'unsigned int' numeric
+ value which selects the corresponding interrupt decription structure
+ in the descriptor structures array.
+ The descriptor structure contains status information and pointers
+ to the interrupt flow method and the interrupt chip structure
+ which are assigned to this interrupt.
+ </para>
+ <para>
+ Whenever an interrupt triggers, the lowlevel arch code calls into
+ the generic interrupt code by calling desc->handle_irq().
+ This highlevel IRQ handling function only uses desc->chip primitives
+ referenced by the assigned chip descriptor structure.
+ </para>
+ </sect1>
+ <sect1>
+ <title>Highlevel Driver API</title>
+ <para>
+ The highlevel Driver API consists of following functions:
+ <itemizedlist>
+ <listitem><para>request_irq()</para></listitem>
+ <listitem><para>free_irq()</para></listitem>
+ <listitem><para>disable_irq()</para></listitem>
+ <listitem><para>enable_irq()</para></listitem>
+ <listitem><para>disable_irq_nosync() (SMP only)</para></listitem>
+ <listitem><para>synchronize_irq() (SMP only)</para></listitem>
+ <listitem><para>set_irq_type()</para></listitem>
+ <listitem><para>set_irq_wake()</para></listitem>
+ <listitem><para>set_irq_data()</para></listitem>
+ <listitem><para>set_irq_chip()</para></listitem>
+ <listitem><para>set_irq_chip_data()</para></listitem>
+ </itemizedlist>
+ See the autogenerated function documentation for details.
+ </para>
+ </sect1>
+ <sect1>
+ <title>Highlevel IRQ flow handlers</title>
+ <para>
+ The generic layer provides a set of pre-defined irq-flow methods:
+ <itemizedlist>
+ <listitem><para>handle_level_irq</para></listitem>
+ <listitem><para>handle_edge_irq</para></listitem>
+ <listitem><para>handle_simple_irq</para></listitem>
+ <listitem><para>handle_percpu_irq</para></listitem>
+ </itemizedlist>
+ The interrupt flow handlers (either predefined or architecture
+ specific) are assigned to specific interrupts by the architecture
+ either during bootup or during device initialization.
+ </para>
+ <sect2>
+ <title>Default flow implementations</title>
+ <sect3>
+ <title>Helper functions</title>
+ <para>
+ The helper functions call the chip primitives and
+ are used by the default flow implementations.
+ The following helper functions are implemented (simplified excerpt):
+ <programlisting>
+default_enable(irq)
+{
+ desc->chip->unmask(irq);
+}
+
+default_disable(irq)
+{
+ if (!delay_disable(irq))
+ desc->chip->mask(irq);
+}
+
+default_ack(irq)
+{
+ chip->ack(irq);
+}
+
+default_mask_ack(irq)
+{
+ if (chip->mask_ack) {
+ chip->mask_ack(irq);
+ } else {
+ chip->mask(irq);
+ chip->ack(irq);
+ }
+}
+
+noop(irq)
+{
+}
+
+ </programlisting>
+ </para>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Default flow handler implementations</title>
+ <sect3>
+ <title>Default Level IRQ flow handler</title>
+ <para>
+ handle_level_irq provides a generic implementation
+ for level-triggered interrupts.
+ </para>
+ <para>
+ The following control flow is implemented (simplified excerpt):
+ <programlisting>
+desc->chip->start();
+handle_IRQ_event(desc->action);
+desc->chip->end();
+ </programlisting>
+ </para>
+ </sect3>
+ <sect3>
+ <title>Default Edge IRQ flow handler</title>
+ <para>
+ handle_edge_irq provides a generic implementation
+ for edge-triggered interrupts.
+ </para>
+ <para>
+ The following control flow is implemented (simplified excerpt):
+ <programlisting>
+if (desc->status &amp; running) {
+ desc->chip->hold();
+ desc->status |= pending | masked;
+ return;
+}
+desc->chip->start();
+desc->status |= running;
+do {
+ if (desc->status &amp; masked)
+ desc->chip->enable();
+ desc-status &amp;= ~pending;
+ handle_IRQ_event(desc->action);
+} while (status &amp; pending);
+desc-status &amp;= ~running;
+desc->chip->end();
+ </programlisting>
+ </para>
+ </sect3>
+ <sect3>
+ <title>Default simple IRQ flow handler</title>
+ <para>
+ handle_simple_irq provides a generic implementation
+ for simple interrupts.
+ </para>
+ <para>
+ Note: The simple flow handler does not call any
+ handler/chip primitives.
+ </para>
+ <para>
+ The following control flow is implemented (simplified excerpt):
+ <programlisting>
+handle_IRQ_event(desc->action);
+ </programlisting>
+ </para>
+ </sect3>
+ <sect3>
+ <title>Default per CPU flow handler</title>
+ <para>
+ handle_percpu_irq provides a generic implementation
+ for per CPU interrupts.
+ </para>
+ <para>
+ Per CPU interrupts are only available on SMP and
+ the handler provides a simplified version without
+ locking.
+ </para>
+ <para>
+ The following control flow is implemented (simplified excerpt):
+ <programlisting>
+desc->chip->start();
+handle_IRQ_event(desc->action);
+desc->chip->end();
+ </programlisting>
+ </para>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Quirks and optimizations</title>
+ <para>
+ The generic functions are intended for 'clean' architectures and chips,
+ which have no platform-specific IRQ handling quirks. If an architecture
+ needs to implement quirks on the 'flow' level then it can do so by
+ overriding the highlevel irq-flow handler.
+ </para>
+ </sect2>
+ <sect2>
+ <title>Delayed interrupt disable</title>
+ <para>
+ This per interrupt selectable feature, which was introduced by Russell
+ King in the ARM interrupt implementation, does not mask an interrupt
+ at the hardware level when disable_irq() is called. The interrupt is
+ kept enabled and is masked in the flow handler when an interrupt event
+ happens. This prevents losing edge interrupts on hardware which does
+ not store an edge interrupt event while the interrupt is disabled at
+ the hardware level. When an interrupt arrives while the IRQ_DISABLED
+ flag is set, then the interrupt is masked at the hardware level and
+ the IRQ_PENDING bit is set. When the interrupt is re-enabled by
+ enable_irq() the pending bit is checked and if it is set, the
+ interrupt is resent either via hardware or by a software resend
+ mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
+ you want to use the delayed interrupt disable feature and your
+ hardware is not capable of retriggering an interrupt.)
+ The delayed interrupt disable can be runtime enabled, per interrupt,
+ by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
+ </para>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Chiplevel hardware encapsulation</title>
+ <para>
+ The chip level hardware descriptor structure irq_chip
+ contains all the direct chip relevant functions, which
+ can be utilized by the irq flow implementations.
+ <itemizedlist>
+ <listitem><para>ack()</para></listitem>
+ <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem>
+ <listitem><para>mask()</para></listitem>
+ <listitem><para>unmask()</para></listitem>
+ <listitem><para>retrigger() - Optional</para></listitem>
+ <listitem><para>set_type() - Optional</para></listitem>
+ <listitem><para>set_wake() - Optional</para></listitem>
+ </itemizedlist>
+ These primitives are strictly intended to mean what they say: ack means
+ ACK, masking means masking of an IRQ line, etc. It is up to the flow
+ handler(s) to use these basic units of lowlevel functionality.
+ </para>
+ </sect1>
+ </chapter>
+
+ <chapter id="doirq">
+ <title>__do_IRQ entry point</title>
+ <para>
+ The original implementation __do_IRQ() is an alternative entry
+ point for all types of interrupts.
+ </para>
+ <para>
+ This handler turned out to be not suitable for all
+ interrupt hardware and was therefore reimplemented with split
+ functionality for egde/level/simple/percpu interrupts. This is not
+ only a functional optimization. It also shortens code paths for
+ interrupts.
+ </para>
+ <para>
+ To make use of the split implementation, replace the call to
+ __do_IRQ by a call to desc->chip->handle_irq() and associate
+ the appropriate handler function to desc->chip->handle_irq().
+ In most cases the generic handler implementations should
+ be sufficient.
+ </para>
+ </chapter>
+
+ <chapter id="locking">
+ <title>Locking on SMP</title>
+ <para>
+ The locking of chip registers is up to the architecture that
+ defines the chip primitives. There is a chip->lock field that can be used
+ for serialization, but the generic layer does not touch it. The per-irq
+ structure is protected via desc->lock, by the generic layer.
+ </para>
+ </chapter>
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the generic IRQ layer.
+ </para>
+!Iinclude/linux/irq.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the kernel API functions
+ which are exported.
+ </para>
+!Ekernel/irq/manage.c
+!Ekernel/irq/chip.c
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Ikernel/irq/handle.c
+!Ikernel/irq/chip.c
+ </chapter>
+
+ <chapter id="credits">
+ <title>Credits</title>
+ <para>
+ The following people have contributed to this document:
+ <orderedlist>
+ <listitem><para>Thomas Gleixner<email>tglx@linutronix.de</email></para></listitem>
+ <listitem><para>Ingo Molnar<email>mingo@elte.hu</email></para></listitem>
+ </orderedlist>
+ </para>
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 158ffe9bfade..644c3884fab9 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1590,7 +1590,7 @@ the amount of locking which needs to be done.
<para>
Our final dilemma is this: when can we actually destroy the
removed element? Remember, a reader might be stepping through
- this element in the list right now: it we free this element and
+ this element in the list right now: if we free this element and
the <symbol>next</symbol> pointer changes, the reader will jump
off into garbage and crash. We need to wait until we know that
all the readers who were traversing the list when we deleted the
diff --git a/Documentation/IRQ.txt b/Documentation/IRQ.txt
new file mode 100644
index 000000000000..1011e7175021
--- /dev/null
+++ b/Documentation/IRQ.txt
@@ -0,0 +1,22 @@
+What is an IRQ?
+
+An IRQ is an interrupt request from a device.
+Currently they can come in over a pin, or over a packet.
+Several devices may be connected to the same pin thus
+sharing an IRQ.
+
+An IRQ number is a kernel identifier used to talk about a hardware
+interrupt source. Typically this is an index into the global irq_desc
+array, but except for what linux/interrupt.h implements the details
+are architecture specific.
+
+An IRQ number is an enumeration of the possible interrupt sources on a
+machine. Typically what is enumerated is the number of input pins on
+all of the interrupt controller in the system. In the case of ISA
+what is enumerated are the 16 input pins on the two i8259 interrupt
+controllers.
+
+Architectures can assign additional meaning to the IRQ numbers, and
+are encouraged to in the case where there is any manual configuration
+of the hardware involved. The ISA IRQs are a classic example of
+assigning this kind of additional meaning.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index e4c38152f7f7..a4948591607d 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -7,7 +7,7 @@ The CONFIG_RCU_TORTURE_TEST config option is available for all RCU
implementations. It creates an rcutorture kernel module that can
be loaded to run a torture test. The test periodically outputs
status messages via printk(), which can be examined via the dmesg
-command (perhaps grepping for "rcutorture"). The test is started
+command (perhaps grepping for "torture"). The test is started
when the module is loaded, and stops when the module is unloaded.
However, actually setting this config option to "y" results in the system
@@ -35,6 +35,19 @@ stat_interval The number of seconds between output of torture
be printed -only- when the module is unloaded, and this
is the default.
+shuffle_interval
+ The number of seconds to keep the test threads affinitied
+ to a particular subset of the CPUs. Used in conjunction
+ with test_no_idle_hz.
+
+test_no_idle_hz Whether or not to test the ability of RCU to operate in
+ a kernel that disables the scheduling-clock interrupt to
+ idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
+
+torture_type The type of RCU to test: "rcu" for the rcu_read_lock()
+ API, "rcu_bh" for the rcu_read_lock_bh() API, and "srcu"
+ for the "srcu_read_lock()" API.
+
verbose Enable debug printk()s. Default is disabled.
@@ -42,14 +55,14 @@ OUTPUT
The statistics output is as follows:
- rcutorture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
- rcutorture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
- rcutorture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
- rcutorture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
- rcutorture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
- rcutorture: --- End of test
+ rcu-torture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
+ rcu-torture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
+ rcu-torture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
+ rcu-torture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
+ rcu-torture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
+ rcu-torture: --- End of test
-The command "dmesg | grep rcutorture:" will extract this information on
+The command "dmesg | grep torture:" will extract this information on
most systems. On more esoteric configurations, it may be necessary to
use other commands to access the output of the printk()s used by
the RCU torture test. The printk()s use KERN_ALERT, so they should
@@ -115,8 +128,9 @@ The following script may be used to torture RCU:
modprobe rcutorture
sleep 100
rmmod rcutorture
- dmesg | grep rcutorture:
+ dmesg | grep torture:
The output can be manually inspected for the error flag of "!!!".
One could of course create a more elaborate script that automatically
-checked for such errors.
+checked for such errors. The "rmmod" command forces a "SUCCESS" or
+"FAILURE" indication to be printk()ed.
diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt
index 8c6ee684174c..3e46d2a31158 100644
--- a/Documentation/arm/Samsung-S3C24XX/Overview.txt
+++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt
@@ -7,11 +7,13 @@ Introduction
------------
The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported
- by the 's3c2410' architecture of ARM Linux. Currently the S3C2410 and
- the S3C2440 are supported CPUs.
+ by the 's3c2410' architecture of ARM Linux. Currently the S3C2410,
+ S3C2440 and S3C2442 devices are supported.
Support for the S3C2400 series is in progress.
+ Support for the S3C2412 and S3C2413 CPUs is being merged.
+
Configuration
-------------
@@ -43,9 +45,18 @@ Machines
Samsung's own development board, geared for PDA work.
+ Samsung/Aiji SMDK2412
+
+ The S3C2412 version of the SMDK2440.
+
+ Samsung/Aiji SMDK2413
+
+ The S3C2412 version of the SMDK2440.
+
Samsung/Meritech SMDK2440
- The S3C2440 compatible version of the SMDK2440
+ The S3C2440 compatible version of the SMDK2440, which has the
+ option of an S3C2440 or S3C2442 CPU module.
Thorcom VR1000
@@ -211,24 +222,6 @@ Port Contributors
Lucas Correia Villa Real (S3C2400 port)
-Document Changes
-----------------
-
- 05 Sep 2004 - BJD - Added Document Changes section
- 05 Sep 2004 - BJD - Added Klaus Fetscher to list of contributors
- 25 Oct 2004 - BJD - Added Dimitry Andric to list of contributors
- 25 Oct 2004 - BJD - Updated the MTD from the 2.6.9 merge
- 21 Jan 2005 - BJD - Added rx3715, added Shannon to contributors
- 10 Feb 2005 - BJD - Added Guillaume Gourat to contributors
- 02 Mar 2005 - BJD - Added SMDK2440 to list of machines
- 06 Mar 2005 - BJD - Added Christer Weinigel
- 08 Mar 2005 - BJD - Added LCVR to list of people, updated introduction
- 08 Mar 2005 - BJD - Added section on adding machines
- 09 Sep 2005 - BJD - Added section on platform data
- 11 Feb 2006 - BJD - Added I2C, RTC and Watchdog sections
- 11 Feb 2006 - BJD - Added Osiris machine, and S3C2400 information
-
-
Document Author
---------------
diff --git a/Documentation/arm/Samsung-S3C24XX/S3C2412.txt b/Documentation/arm/Samsung-S3C24XX/S3C2412.txt
new file mode 100644
index 000000000000..cb82a7fc7901
--- /dev/null
+++ b/Documentation/arm/Samsung-S3C24XX/S3C2412.txt
@@ -0,0 +1,120 @@
+ S3C2412 ARM Linux Overview
+ ==========================
+
+Introduction
+------------
+
+ The S3C2412 is part of the S3C24XX range of ARM9 System-on-Chip CPUs
+ from Samsung. This part has an ARM926-EJS core, capable of running up
+ to 266MHz (see data-sheet for more information)
+
+
+Clock
+-----
+
+ The core clock code provides a set of clocks to the drivers, and allows
+ for source selection and a number of other features.
+
+
+Power
+-----
+
+ No support for suspend/resume to RAM in the current system.
+
+
+DMA
+---
+
+ No current support for DMA.
+
+
+GPIO
+----
+
+ There is support for setting the GPIO to input/output/special function
+ and reading or writing to them.
+
+
+UART
+----
+
+ The UART hardware is similar to the S3C2440, and is supported by the
+ s3c2410 driver in the drivers/serial directory.
+
+
+NAND
+----
+
+ The NAND hardware is similar to the S3C2440, and is supported by the
+ s3c2410 driver in the drivers/mtd/nand directory.
+
+
+USB Host
+--------
+
+ The USB hardware is similar to the S3C2410, with extended clock source
+ control. The OHCI portion is supported by the ohci-s3c2410 driver, and
+ the clock control selection is supported by the core clock code.
+
+
+USB Device
+----------
+
+ No current support in the kernel
+
+
+IRQs
+----
+
+ All the standard, and external interrupt sources are supported. The
+ extra sub-sources are not yet supported.
+
+
+RTC
+---
+
+ The RTC hardware is similar to the S3C2410, and is supported by the
+ s3c2410-rtc driver.
+
+
+Watchdog
+--------
+
+ The watchdog harware is the same as the S3C2410, and is supported by
+ the s3c2410_wdt driver.
+
+
+MMC/SD/SDIO
+-----------
+
+ No current support for the MMC/SD/SDIO block.
+
+IIC
+---
+
+ The IIC hardware is the same as the S3C2410, and is supported by the
+ i2c-s3c24xx driver.
+
+
+IIS
+---
+
+ No current support for the IIS interface.
+
+
+SPI
+---
+
+ No current support for the SPI interfaces.
+
+
+ATA
+---
+
+ No current support for the on-board ATA block.
+
+
+Document Author
+---------------
+
+Ben Dooks, (c) 2006 Simtec Electronics
diff --git a/Documentation/arm/Samsung-S3C24XX/S3C2413.txt b/Documentation/arm/Samsung-S3C24XX/S3C2413.txt
new file mode 100644
index 000000000000..ab2a88858f12
--- /dev/null
+++ b/Documentation/arm/Samsung-S3C24XX/S3C2413.txt
@@ -0,0 +1,21 @@
+ S3C2413 ARM Linux Overview
+ ==========================
+
+Introduction
+------------
+
+ The S3C2413 is an extended version of the S3C2412, with an camera
+ interface and mobile DDR memory support. See the S3C2412 support
+ documentation for more information.
+
+
+Camera Interface
+---------------
+
+ This block is currently not supported.
+
+
+Document Author
+---------------
+
+Ben Dooks, (c) 2006 Simtec Electronics
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 23a1c2402bcc..2a63d5662a93 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -157,13 +157,13 @@ For example, smp_mb__before_atomic_dec() can be used like so:
smp_mb__before_atomic_dec();
atomic_dec(&obj->ref_count);
-It makes sure that all memory operations preceeding the atomic_dec()
+It makes sure that all memory operations preceding the atomic_dec()
call are strongly ordered with respect to the atomic counter
-operation. In the above example, it guarentees that the assignment of
+operation. In the above example, it guarantees that the assignment of
"1" to obj->dead will be globally visible to other cpus before the
atomic counter decrement.
-Without the explicitl smp_mb__before_atomic_dec() call, the
+Without the explicit smp_mb__before_atomic_dec() call, the
implementation could legally allow the atomic counter update visible
to other cpus before the "obj->dead = 1;" assignment.
@@ -173,11 +173,11 @@ ordering with respect to memory operations after an atomic_dec() call
(smp_mb__{before,after}_atomic_inc()).
A missing memory barrier in the cases where they are required by the
-atomic_t implementation above can have disasterous results. Here is
-an example, which follows a pattern occuring frequently in the Linux
+atomic_t implementation above can have disastrous results. Here is
+an example, which follows a pattern occurring frequently in the Linux
kernel. It is the use of atomic counters to implement reference
counting, and it works such that once the counter falls to zero it can
-be guarenteed that no other entity can be accessing the object:
+be guaranteed that no other entity can be accessing the object:
static void obj_list_add(struct obj *obj)
{
@@ -291,9 +291,9 @@ to the size of an "unsigned long" C data type, and are least of that
size. The endianness of the bits within each "unsigned long" are the
native endianness of the cpu.
- void set_bit(unsigned long nr, volatils unsigned long *addr);
- void clear_bit(unsigned long nr, volatils unsigned long *addr);
- void change_bit(unsigned long nr, volatils unsigned long *addr);
+ void set_bit(unsigned long nr, volatile unsigned long *addr);
+ void clear_bit(unsigned long nr, volatile unsigned long *addr);
+ void change_bit(unsigned long nr, volatile unsigned long *addr);
These routines set, clear, and change, respectively, the bit number
indicated by "nr" on the bit mask pointed to by "ADDR".
@@ -301,9 +301,9 @@ indicated by "nr" on the bit mask pointed to by "ADDR".
They must execute atomically, yet there are no implicit memory barrier
semantics required of these interfaces.
- int test_and_set_bit(unsigned long nr, volatils unsigned long *addr);
- int test_and_clear_bit(unsigned long nr, volatils unsigned long *addr);
- int test_and_change_bit(unsigned long nr, volatils unsigned long *addr);
+ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
+ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
+ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
Like the above, except that these routines return a boolean which
indicates whether the changed bit was set _BEFORE_ the atomic bit
@@ -335,7 +335,7 @@ subsequent memory operation is made visible. For example:
/* ... */;
obj->killed = 1;
-The implementation of test_and_set_bit() must guarentee that
+The implementation of test_and_set_bit() must guarantee that
"obj->dead = 1;" is visible to cpus before the atomic memory operation
done by test_and_set_bit() becomes visible. Likewise, the atomic
memory operation done by test_and_set_bit() must become visible before
@@ -474,7 +474,7 @@ Now, as far as memory barriers go, as long as spin_lock()
strictly orders all subsequent memory operations (including
the cas()) with respect to itself, things will be fine.
-Said another way, _atomic_dec_and_lock() must guarentee that
+Said another way, _atomic_dec_and_lock() must guarantee that
a counter dropping to zero is never made visible before the
spinlock being acquired.
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
new file mode 100644
index 000000000000..d3e17447321c
--- /dev/null
+++ b/Documentation/console/console.txt
@@ -0,0 +1,144 @@
+Console Drivers
+===============
+
+The linux kernel has 2 general types of console drivers. The first type is
+assigned by the kernel to all the virtual consoles during the boot process.
+This type will be called 'system driver', and only one system driver is allowed
+to exist. The system driver is persistent and it can never be unloaded, though
+it may become inactive.
+
+The second type has to be explicitly loaded and unloaded. This will be called
+'modular driver' by this document. Multiple modular drivers can coexist at
+any time with each driver sharing the console with other drivers including
+the system driver. However, modular drivers cannot take over the console
+that is currently occupied by another modular driver. (Exception: Drivers that
+call take_over_console() will succeed in the takeover regardless of the type
+of driver occupying the consoles.) They can only take over the console that is
+occupied by the system driver. In the same token, if the modular driver is
+released by the console, the system driver will take over.
+
+Modular drivers, from the programmer's point of view, has to call:
+
+ take_over_console() - load and bind driver to console layer
+ give_up_console() - unbind and unload driver
+
+In newer kernels, the following are also available:
+
+ register_con_driver()
+ unregister_con_driver()
+
+If sysfs is enabled, the contents of /sys/class/vtconsole can be
+examined. This shows the console backends currently registered by the
+system which are named vtcon<n> where <n> is an integer fro 0 to 15. Thus:
+
+ ls /sys/class/vtconsole
+ . .. vtcon0 vtcon1
+
+Each directory in /sys/class/vtconsole has 3 files:
+
+ ls /sys/class/vtconsole/vtcon0
+ . .. bind name uevent
+
+What do these files signify?
+
+ 1. bind - this is a read/write file. It shows the status of the driver if
+ read, or acts to bind or unbind the driver to the virtual consoles
+ when written to. The possible values are:
+
+ 0 - means the driver is not bound and if echo'ed, commands the driver
+ to unbind
+
+ 1 - means the driver is bound and if echo'ed, commands the driver to
+ bind
+
+ 2. name - read-only file. Shows the name of the driver in this format:
+
+ cat /sys/class/vtconsole/vtcon0/name
+ (S) VGA+
+
+ '(S)' stands for a (S)ystem driver, ie, it cannot be directly
+ commanded to bind or unbind
+
+ 'VGA+' is the name of the driver
+
+ cat /sys/class/vtconsole/vtcon1/name
+ (M) frame buffer device
+
+ In this case, '(M)' stands for a (M)odular driver, one that can be
+ directly commanded to bind or unbind.
+
+ 3. uevent - ignore this file
+
+When unbinding, the modular driver is detached first, and then the system
+driver takes over the consoles vacated by the driver. Binding, on the other
+hand, will bind the driver to the consoles that are currently occupied by a
+system driver.
+
+NOTE1: Binding and binding must be selected in Kconfig. It's under:
+
+Device Drivers -> Character devices -> Support for binding and unbinding
+console drivers
+
+NOTE2: If any of the virtual consoles are in KD_GRAPHICS mode, then binding or
+unbinding will not succeed. An example of an application that sets the console
+to KD_GRAPHICS is X.
+
+How useful is this feature? This is very useful for console driver
+developers. By unbinding the driver from the console layer, one can unload the
+driver, make changes, recompile, reload and rebind the driver without any need
+for rebooting the kernel. For regular users who may want to switch from
+framebuffer console to VGA console and vice versa, this feature also makes
+this possible. (NOTE NOTE NOTE: Please read fbcon.txt under Documentation/fb
+for more details).
+
+Notes for developers:
+=====================
+
+take_over_console() is now broken up into:
+
+ register_con_driver()
+ bind_con_driver() - private function
+
+give_up_console() is a wrapper to unregister_con_driver(), and a driver must
+be fully unbound for this call to succeed. con_is_bound() will check if the
+driver is bound or not.
+
+Guidelines for console driver writers:
+=====================================
+
+In order for binding to and unbinding from the console to properly work,
+console drivers must follow these guidelines:
+
+1. All drivers, except system drivers, must call either register_con_driver()
+ or take_over_console(). register_con_driver() will just add the driver to
+ the console's internal list. It won't take over the
+ console. take_over_console(), as it name implies, will also take over (or
+ bind to) the console.
+
+2. All resources allocated during con->con_init() must be released in
+ con->con_deinit().
+
+3. All resources allocated in con->con_startup() must be released when the
+ driver, which was previously bound, becomes unbound. The console layer
+ does not have a complementary call to con->con_startup() so it's up to the
+ driver to check when it's legal to release these resources. Calling
+ con_is_bound() in con->con_deinit() will help. If the call returned
+ false(), then it's safe to release the resources. This balance has to be
+ ensured because con->con_startup() can be called again when a request to
+ rebind the driver to the console arrives.
+
+4. Upon exit of the driver, ensure that the driver is totally unbound. If the
+ condition is satisfied, then the driver must call unregister_con_driver()
+ or give_up_console().
+
+5. unregister_con_driver() can also be called on conditions which make it
+ impossible for the driver to service console requests. This can happen
+ with the framebuffer console that suddenly lost all of its drivers.
+
+The current crop of console drivers should still work correctly, but binding
+and unbinding them may cause problems. With minimal fixes, these drivers can
+be made to work correctly.
+
+==========================
+Antonino Daplas <adaplas@pol.net>
+
diff --git a/Documentation/driver-model/overview.txt b/Documentation/driver-model/overview.txt
index ac4a7a737e43..2050c9ffc629 100644
--- a/Documentation/driver-model/overview.txt
+++ b/Documentation/driver-model/overview.txt
@@ -18,7 +18,7 @@ Traditional driver models implemented some sort of tree-like structure
(sometimes just a list) for the devices they control. There wasn't any
uniformity across the different bus types.
-The current driver model provides a comon, uniform data model for describing
+The current driver model provides a common, uniform data model for describing
a bus and the devices that can appear under the bus. The unified bus
model includes a set of common attributes which all busses carry, and a set
of common callbacks, such as device discovery during bus probing, bus
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 08dce0f631bf..f373df12ed4c 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -135,10 +135,10 @@ C. Boot options
The angle can be changed anytime afterwards by 'echoing' the same
numbers to any one of the 2 attributes found in
- /sys/class/graphics/fb{x}
+ /sys/class/graphics/fbcon
- con_rotate - rotate the display of the active console
- con_rotate_all - rotate the display of all consoles
+ rotate - rotate the display of the active console
+ rotate_all - rotate the display of all consoles
Console rotation will only become available if Console Rotation
Support is compiled in your kernel.
@@ -148,5 +148,177 @@ C. Boot options
Actually, the underlying fb driver is totally ignorant of console
rotation.
----
+C. Attaching, Detaching and Unloading
+
+Before going on on how to attach, detach and unload the framebuffer console, an
+illustration of the dependencies may help.
+
+The console layer, as with most subsystems, needs a driver that interfaces with
+the hardware. Thus, in a VGA console:
+
+console ---> VGA driver ---> hardware.
+
+Assuming the VGA driver can be unloaded, one must first unbind the VGA driver
+from the console layer before unloading the driver. The VGA driver cannot be
+unloaded if it is still bound to the console layer. (See
+Documentation/console/console.txt for more information).
+
+This is more complicated in the case of the the framebuffer console (fbcon),
+because fbcon is an intermediate layer between the console and the drivers:
+
+console ---> fbcon ---> fbdev drivers ---> hardware
+
+The fbdev drivers cannot be unloaded if it's bound to fbcon, and fbcon cannot
+be unloaded if it's bound to the console layer.
+
+So to unload the fbdev drivers, one must first unbind fbcon from the console,
+then unbind the fbdev drivers from fbcon. Fortunately, unbinding fbcon from
+the console layer will automatically unbind framebuffer drivers from
+fbcon. Thus, there is no need to explicitly unbind the fbdev drivers from
+fbcon.
+
+So, how do we unbind fbcon from the console? Part of the answer is in
+Documentation/console/console.txt. To summarize:
+
+Echo a value to the bind file that represents the framebuffer console
+driver. So assuming vtcon1 represents fbcon, then:
+
+echo 1 > sys/class/vtconsole/vtcon1/bind - attach framebuffer console to
+ console layer
+echo 0 > sys/class/vtconsole/vtcon1/bind - detach framebuffer console from
+ console layer
+
+If fbcon is detached from the console layer, your boot console driver (which is
+usually VGA text mode) will take over. A few drivers (rivafb and i810fb) will
+restore VGA text mode for you. With the rest, before detaching fbcon, you
+must take a few additional steps to make sure that your VGA text mode is
+restored properly. The following is one of the several methods that you can do:
+
+1. Download or install vbetool. This utility is included with most
+ distributions nowadays, and is usually part of the suspend/resume tool.
+
+2. In your kernel configuration, ensure that CONFIG_FRAMEBUFFER_CONSOLE is set
+ to 'y' or 'm'. Enable one or more of your favorite framebuffer drivers.
+
+3. Boot into text mode and as root run:
+
+ vbetool vbestate save > <vga state file>
+
+ The above command saves the register contents of your graphics
+ hardware to <vga state file>. You need to do this step only once as
+ the state file can be reused.
+
+4. If fbcon is compiled as a module, load fbcon by doing:
+
+ modprobe fbcon
+
+5. Now to detach fbcon:
+
+ vbetool vbestate restore < <vga state file> && \
+ echo 0 > /sys/class/vtconsole/vtcon1/bind
+
+6. That's it, you're back to VGA mode. And if you compiled fbcon as a module,
+ you can unload it by 'rmmod fbcon'
+
+7. To reattach fbcon:
+
+ echo 1 > /sys/class/vtconsole/vtcon1/bind
+
+8. Once fbcon is unbound, all drivers registered to the system will also
+become unbound. This means that fbcon and individual framebuffer drivers
+can be unloaded or reloaded at will. Reloading the drivers or fbcon will
+automatically bind the console, fbcon and the drivers together. Unloading
+all the drivers without unloading fbcon will make it impossible for the
+console to bind fbcon.
+
+Notes for vesafb users:
+=======================
+
+Unfortunately, if your bootline includes a vga=xxx parameter that sets the
+hardware in graphics mode, such as when loading vesafb, vgacon will not load.
+Instead, vgacon will replace the default boot console with dummycon, and you
+won't get any display after detaching fbcon. Your machine is still alive, so
+you can reattach vesafb. However, to reattach vesafb, you need to do one of
+the following:
+
+Variation 1:
+
+ a. Before detaching fbcon, do
+
+ vbetool vbemode save > <vesa state file> # do once for each vesafb mode,
+ # the file can be reused
+
+ b. Detach fbcon as in step 5.
+
+ c. Attach fbcon
+
+ vbetool vbestate restore < <vesa state file> && \
+ echo 1 > /sys/class/vtconsole/vtcon1/bind
+
+Variation 2:
+
+ a. Before detaching fbcon, do:
+ echo <ID> > /sys/class/tty/console/bind
+
+
+ vbetool vbemode get
+
+ b. Take note of the mode number
+
+ b. Detach fbcon as in step 5.
+
+ c. Attach fbcon:
+
+ vbetool vbemode set <mode number> && \
+ echo 1 > /sys/class/vtconsole/vtcon1/bind
+
+Samples:
+========
+
+Here are 2 sample bash scripts that you can use to bind or unbind the
+framebuffer console driver if you are in an X86 box:
+
+---------------------------------------------------------------------------
+#!/bin/bash
+# Unbind fbcon
+
+# Change this to where your actual vgastate file is located
+# Or Use VGASTATE=$1 to indicate the state file at runtime
+VGASTATE=/tmp/vgastate
+
+# path to vbetool
+VBETOOL=/usr/local/bin
+
+
+for (( i = 0; i < 16; i++))
+do
+ if test -x /sys/class/vtconsole/vtcon$i; then
+ if [ `cat /sys/class/vtconsole/vtcon$i/name | grep -c "frame buffer"` \
+ = 1 ]; then
+ if test -x $VBETOOL/vbetool; then
+ echo Unbinding vtcon$i
+ $VBETOOL/vbetool vbestate restore < $VGASTATE
+ echo 0 > /sys/class/vtconsole/vtcon$i/bind
+ fi
+ fi
+ fi
+done
+
+---------------------------------------------------------------------------
+#!/bin/bash
+# Bind fbcon
+
+for (( i = 0; i < 16; i++))
+do
+ if test -x /sys/class/vtconsole/vtcon$i; then
+ if [ `cat /sys/class/vtconsole/vtcon$i/name | grep -c "frame buffer"` \
+ = 1 ]; then
+ echo Unbinding vtcon$i
+ echo 1 > /sys/class/vtconsole/vtcon$i/bind
+ fi
+ fi
+done
+---------------------------------------------------------------------------
+
+--
Antonino Daplas <adaplas@pol.net>
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 027285d0c26c..033ac91da07a 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -177,6 +177,16 @@ Who: Jean Delvare <khali@linux-fr.org>
---------------------------
+What: Unused EXPORT_SYMBOL/EXPORT_SYMBOL_GPL exports
+ (temporary transition config option provided until then)
+ The transition config option will also be removed at the same time.
+When: before 2.6.19
+Why: Unused symbols are both increasing the size of the kernel binary
+ and are often a sign of "wrong API"
+Who: Arjan van de Ven <arjan@linux.intel.com>
+
+---------------------------
+
What: remove EXPORT_SYMBOL(tasklist_lock)
When: August 2006
Files: kernel/fork.c
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index afb1335c05d6..4aecc9bdb273 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -113,6 +113,14 @@ noquota
grpquota
usrquota
+bh (*) ext3 associates buffer heads to data pages to
+nobh (a) cache disk block mapping information
+ (b) link pages into transaction to provide
+ ordering guarantees.
+ "bh" option forces use of buffer heads.
+ "nobh" option tries to avoid associating buffer
+ heads (supported only for "writeback" mode).
+
Specification
=============
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index a9c00facdf40..14ef3868a328 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1123,6 +1123,14 @@ The top Makefile exports the following variables:
$(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE). The user may
override this value on the command line if desired.
+ INSTALL_MOD_STRIP
+
+ If this variable is specified, will cause modules to be stripped
+ after they are installed. If INSTALL_MOD_STRIP is '1', then the
+ default option --strip-debug will be used. Otherwise,
+ INSTALL_MOD_STRIP will used as the option(s) to the strip command.
+
+
=== 8 Makefile language
The kernel Makefiles are designed to run with GNU Make. The Makefiles
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index dcf5580380ab..9b9b454b048a 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -175,7 +175,7 @@ end
document trapinfo
Run info threads and lookup pid of thread #1
'trapinfo <pid>' will tell you by which trap & possibly
- addresthe kernel paniced.
+ address the kernel panicked.
end
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bca6f389da66..25f8d20dac53 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -61,6 +61,7 @@ parameter is applicable:
MTD MTD support is enabled.
NET Appropriate network support is enabled.
NUMA NUMA support is enabled.
+ GENERIC_TIME The generic timeofday code is enabled.
NFS Appropriate NFS support is enabled.
OSS OSS sound support is enabled.
PARIDE The ParIDE subsystem is enabled.
@@ -179,6 +180,11 @@ running once the system is up.
override platform specific driver.
See also Documentation/acpi-hotkey.txt.
+ acpi_pm_good [IA-32,X86-64]
+ Override the pmtimer bug detection: force the kernel
+ to assume that this machine's pmtimer latches its value
+ and always returns good values.
+
enable_timer_pin_1 [i386,x86-64]
Enable PIN 1 of APIC timer
Can be useful to work around chipset bugs
@@ -341,10 +347,11 @@ running once the system is up.
Value can be changed at runtime via
/selinux/checkreqprot.
- clock= [BUGS=IA-32,HW] gettimeofday timesource override.
- Forces specified timesource (if avaliable) to be used
- when calculating gettimeofday(). If specicified
- timesource is not avalible, it defaults to PIT.
+ clock= [BUGS=IA-32, HW] gettimeofday clocksource override.
+ [Deprecated]
+ Forces specified clocksource (if avaliable) to be used
+ when calculating gettimeofday(). If specified
+ clocksource is not avalible, it defaults to PIT.
Format: { pit | tsc | cyclone | pmtmr }
disable_8254_timer
@@ -1617,6 +1624,10 @@ running once the system is up.
time Show timing data prefixed to each printk message line
+ clocksource= [GENERIC_TIME] Override the default clocksource
+ Override the default clocksource and use the clocksource
+ with the name specified.
+
tipar.timeout= [HW,PPT]
Set communications timeout in tenths of a second
(default 15).
@@ -1658,6 +1669,10 @@ running once the system is up.
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
+ vdso= [IA-32]
+ vdso=1: enable VDSO (default)
+ vdso=0: disable VDSO mapping
+
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
@@ -1674,9 +1689,14 @@ running once the system is up.
decrease the size and leave more room for directly
mapped kernel RAM.
- vmhalt= [KNL,S390]
+ vmhalt= [KNL,S390] Perform z/VM CP command after system halt.
+ Format: <command>
+
+ vmpanic= [KNL,S390] Perform z/VM CP command after kernel panic.
+ Format: <command>
- vmpoff= [KNL,S390]
+ vmpoff= [KNL,S390] Perform z/VM CP command after power off.
+ Format: <command>
waveartist= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>
diff --git a/Documentation/keys-request-key.txt b/Documentation/keys-request-key.txt
index 22488d791168..c1f64fdf84cb 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/keys-request-key.txt
@@ -3,16 +3,23 @@
===================
The key request service is part of the key retention service (refer to
-Documentation/keys.txt). This document explains more fully how that the
-requesting algorithm works.
+Documentation/keys.txt). This document explains more fully how the requesting
+algorithm works.
The process starts by either the kernel requesting a service by calling
-request_key():
+request_key*():
struct key *request_key(const struct key_type *type,
const char *description,
const char *callout_string);
+or:
+
+ struct key *request_key_with_auxdata(const struct key_type *type,
+ const char *description,
+ const char *callout_string,
+ void *aux);
+
Or by userspace invoking the request_key system call:
key_serial_t request_key(const char *type,
@@ -20,16 +27,26 @@ Or by userspace invoking the request_key system call:
const char *callout_info,
key_serial_t dest_keyring);
-The main difference between the two access points is that the in-kernel
-interface does not need to link the key to a keyring to prevent it from being
-immediately destroyed. The kernel interface returns a pointer directly to the
-key, and it's up to the caller to destroy the key.
+The main difference between the access points is that the in-kernel interface
+does not need to link the key to a keyring to prevent it from being immediately
+destroyed. The kernel interface returns a pointer directly to the key, and
+it's up to the caller to destroy the key.
+
+The request_key_with_auxdata() call is like the in-kernel request_key() call,
+except that it permits auxiliary data to be passed to the upcaller (the default
+is NULL). This is only useful for those key types that define their own upcall
+mechanism rather than using /sbin/request-key.
The userspace interface links the key to a keyring associated with the process
to prevent the key from going away, and returns the serial number of the key to
the caller.
+The following example assumes that the key types involved don't define their
+own upcall mechanisms. If they do, then those should be substituted for the
+forking and execution of /sbin/request-key.
+
+
===========
THE PROCESS
===========
@@ -40,8 +57,8 @@ A request proceeds in the following manner:
interface].
(2) request_key() searches the process's subscribed keyrings to see if there's
- a suitable key there. If there is, it returns the key. If there isn't, and
- callout_info is not set, an error is returned. Otherwise the process
+ a suitable key there. If there is, it returns the key. If there isn't,
+ and callout_info is not set, an error is returned. Otherwise the process
proceeds to the next step.
(3) request_key() sees that A doesn't have the desired key yet, so it creates
@@ -62,7 +79,7 @@ A request proceeds in the following manner:
instantiation.
(7) The program may want to access another key from A's context (say a
- Kerberos TGT key). It just requests the appropriate key, and the keyring
+ Kerberos TGT key). It just requests the appropriate key, and the keyring
search notes that the session keyring has auth key V in its bottom level.
This will permit it to then search the keyrings of process A with the
@@ -79,10 +96,11 @@ A request proceeds in the following manner:
(10) The program then exits 0 and request_key() deletes key V and returns key
U to the caller.
-This also extends further. If key W (step 7 above) didn't exist, key W would be
-created uninstantiated, another auth key (X) would be created (as per step 3)
-and another copy of /sbin/request-key spawned (as per step 4); but the context
-specified by auth key X will still be process A, as it was in auth key V.
+This also extends further. If key W (step 7 above) didn't exist, key W would
+be created uninstantiated, another auth key (X) would be created (as per step
+3) and another copy of /sbin/request-key spawned (as per step 4); but the
+context specified by auth key X will still be process A, as it was in auth key
+V.
This is because process A's keyrings can't simply be attached to
/sbin/request-key at the appropriate places because (a) execve will discard two
@@ -118,17 +136,17 @@ A search of any particular keyring proceeds in the following fashion:
(2) It considers all the non-keyring keys within that keyring and, if any key
matches the criteria specified, calls key_permission(SEARCH) on it to see
- if the key is allowed to be found. If it is, that key is returned; if
+ if the key is allowed to be found. If it is, that key is returned; if
not, the search continues, and the error code is retained if of higher
priority than the one currently set.
(3) It then considers all the keyring-type keys in the keyring it's currently
- searching. It calls key_permission(SEARCH) on each keyring, and if this
+ searching. It calls key_permission(SEARCH) on each keyring, and if this
grants permission, it recurses, executing steps (2) and (3) on that
keyring.
The process stops immediately a valid key is found with permission granted to
-use it. Any error from a previous match attempt is discarded and the key is
+use it. Any error from a previous match attempt is discarded and the key is
returned.
When search_process_keyrings() is invoked, it performs the following searches
@@ -153,7 +171,7 @@ The moment one succeeds, all pending errors are discarded and the found key is
returned.
Only if all these fail does the whole thing fail with the highest priority
-error. Note that several errors may have come from LSM.
+error. Note that several errors may have come from LSM.
The error priority is:
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 3bbe157b45e4..e373f0212843 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -241,25 +241,30 @@ The security class "key" has been added to SELinux so that mandatory access
controls can be applied to keys created within various contexts. This support
is preliminary, and is likely to change quite significantly in the near future.
Currently, all of the basic permissions explained above are provided in SELinux
-as well; SE Linux is simply invoked after all basic permission checks have been
+as well; SELinux is simply invoked after all basic permission checks have been
performed.
-Each key is labeled with the same context as the task to which it belongs.
-Typically, this is the same task that was running when the key was created.
-The default keyrings are handled differently, but in a way that is very
-intuitive:
+The value of the file /proc/self/attr/keycreate influences the labeling of
+newly-created keys. If the contents of that file correspond to an SELinux
+security context, then the key will be assigned that context. Otherwise, the
+key will be assigned the current context of the task that invoked the key
+creation request. Tasks must be granted explicit permission to assign a
+particular context to newly-created keys, using the "create" permission in the
+key security class.
- (*) The user and user session keyrings that are created when the user logs in
- are currently labeled with the context of the login manager.
-
- (*) The keyrings associated with new threads are each labeled with the context
- of their associated thread, and both session and process keyrings are
- handled similarly.
+The default keyrings associated with users will be labeled with the default
+context of the user if and only if the login programs have been instrumented to
+properly initialize keycreate during the login process. Otherwise, they will
+be labeled with the context of the login program itself.
Note, however, that the default keyrings associated with the root user are
labeled with the default kernel context, since they are created early in the
boot process, before root has a chance to log in.
+The keyrings associated with new threads are each labeled with the context of
+their associated thread, and both session and process keyrings are handled
+similarly.
+
================
NEW PROCFS FILES
@@ -270,9 +275,17 @@ about the status of the key service:
(*) /proc/keys
- This lists all the keys on the system, giving information about their
- type, description and permissions. The payload of the key is not available
- this way:
+ This lists the keys that are currently viewable by the task reading the
+ file, giving information about their type, description and permissions.
+ It is not possible to view the payload of the key this way, though some
+ information about it may be given.
+
+ The only keys included in the list are those that grant View permission to
+ the reading process whether or not it possesses them. Note that LSM
+ security checks are still performed, and may further filter out keys that
+ the current process is not authorised to view.
+
+ The contents of the file look like this:
SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY
00000001 I----- 39 perm 1f3f0000 0 0 keyring _uid_ses.0: 1/4
@@ -300,7 +313,7 @@ about the status of the key service:
(*) /proc/key-users
This file lists the tracking data for each user that has at least one key
- on the system. Such data includes quota information and statistics:
+ on the system. Such data includes quota information and statistics:
[root@andromeda root]# cat /proc/key-users
0: 46 45/45 1/100 13/10000
@@ -767,6 +780,17 @@ payload contents" for more information.
See also Documentation/keys-request-key.txt.
+(*) To search for a key, passing auxiliary data to the upcaller, call:
+
+ struct key *request_key_with_auxdata(const struct key_type *type,
+ const char *description,
+ const char *callout_string,
+ void *aux);
+
+ This is identical to request_key(), except that the auxiliary data is
+ passed to the key_type->request_key() op if it exists.
+
+
(*) When it is no longer required, the key should be released using:
void key_put(struct key *key);
@@ -1018,6 +1042,24 @@ The structure has a number of fields, some of which are mandatory:
as might happen when the userspace buffer is accessed.
+ (*) int (*request_key)(struct key *key, struct key *authkey, const char *op,
+ void *aux);
+
+ This method is optional. If provided, request_key() and
+ request_key_with_auxdata() will invoke this function rather than
+ upcalling to /sbin/request-key to operate upon a key of this type.
+
+ The aux parameter is as passed to request_key_with_auxdata() or is NULL
+ otherwise. Also passed are the key to be operated upon, the
+ authorisation key for this operation and the operation type (currently
+ only "create").
+
+ This function should return only when the upcall is complete. Upon return
+ the authorisation key will be revoked, and the target key will be
+ negatively instantiated if it is still uninstantiated. The error will be
+ returned to the caller of request_key*().
+
+
============================
REQUEST-KEY CALLBACK SERVICE
============================
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 03a13c462cf2..0668f9dc9d29 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -200,6 +200,17 @@ All md devices contain:
This can be written only while the array is being assembled, not
after it is started.
+ layout
+ The "layout" for the array for the particular level. This is
+ simply a number that is interpretted differently by different
+ levels. It can be written while assembling an array.
+
+ resync_start
+ The point at which resync should start. If no resync is needed,
+ this will be a very large number. At array creation it will
+ default to 0, though starting the array as 'clean' will
+ set it much larger.
+
new_dev
This file can be written but not read. The value written should
be a block device number as major:minor. e.g. 8:0
@@ -207,6 +218,54 @@ All md devices contain:
available. It will then appear at md/dev-XXX (depending on the
name of the device) and further configuration is then possible.
+ safe_mode_delay
+ When an md array has seen no write requests for a certain period
+ of time, it will be marked as 'clean'. When another write
+ request arrive, the array is marked as 'dirty' before the write
+ commenses. This is known as 'safe_mode'.
+ The 'certain period' is controlled by this file which stores the
+ period as a number of seconds. The default is 200msec (0.200).
+ Writing a value of 0 disables safemode.
+
+ array_state
+ This file contains a single word which describes the current
+ state of the array. In many cases, the state can be set by
+ writing the word for the desired state, however some states
+ cannot be explicitly set, and some transitions are not allowed.
+
+ clear
+ No devices, no size, no level
+ Writing is equivalent to STOP_ARRAY ioctl
+ inactive
+ May have some settings, but array is not active
+ all IO results in error
+ When written, doesn't tear down array, but just stops it
+ suspended (not supported yet)
+ All IO requests will block. The array can be reconfigured.
+ Writing this, if accepted, will block until array is quiessent
+ readonly
+ no resync can happen. no superblocks get written.
+ write requests fail
+ read-auto
+ like readonly, but behaves like 'clean' on a write request.
+
+ clean - no pending writes, but otherwise active.
+ When written to inactive array, starts without resync
+ If a write request arrives then
+ if metadata is known, mark 'dirty' and switch to 'active'.
+ if not known, block and switch to write-pending
+ If written to an active array that has pending writes, then fails.
+ active
+ fully active: IO and resync can be happening.
+ When written to inactive array, starts with resync
+
+ write-pending
+ clean, but writes are blocked waiting for 'active' to be written.
+
+ active-idle
+ like active, but no writes have been seen for a while (safe_mode_delay).
+
+
sync_speed_min
sync_speed_max
This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
@@ -250,10 +309,18 @@ Each directory contains:
faulty - device has been kicked from active use due to
a detected fault
in_sync - device is a fully in-sync member of the array
+ writemostly - device will only be subject to read
+ requests if there are no other options.
+ This applies only to raid1 arrays.
spare - device is working, but not a full member.
This includes spares that are in the process
of being recoverred to
This list make grow in future.
+ This can be written to.
+ Writing "faulty" simulates a failure on the device.
+ Writing "remove" removes the device from the array.
+ Writing "writemostly" sets the writemostly flag.
+ Writing "-writemostly" clears the writemostly flag.
errors
An approximate count of read errors that have been detected on
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
new file mode 100644
index 000000000000..5d61dacd21f6
--- /dev/null
+++ b/Documentation/pi-futex.txt
@@ -0,0 +1,121 @@
+Lightweight PI-futexes
+----------------------
+
+We are calling them lightweight for 3 reasons:
+
+ - in the user-space fastpath a PI-enabled futex involves no kernel work
+ (or any other PI complexity) at all. No registration, no extra kernel
+ calls - just pure fast atomic ops in userspace.
+
+ - even in the slowpath, the system call and scheduling pattern is very
+ similar to normal futexes.
+
+ - the in-kernel PI implementation is streamlined around the mutex
+ abstraction, with strict rules that keep the implementation
+ relatively simple: only a single owner may own a lock (i.e. no
+ read-write lock support), only the owner may unlock a lock, no
+ recursive locking, etc.
+
+Priority Inheritance - why?
+---------------------------
+
+The short reply: user-space PI helps achieving/improving determinism for
+user-space applications. In the best-case, it can help achieve
+determinism and well-bound latencies. Even in the worst-case, PI will
+improve the statistical distribution of locking related application
+delays.
+
+The longer reply:
+-----------------
+
+Firstly, sharing locks between multiple tasks is a common programming
+technique that often cannot be replaced with lockless algorithms. As we
+can see it in the kernel [which is a quite complex program in itself],
+lockless structures are rather the exception than the norm - the current
+ratio of lockless vs. locky code for shared data structures is somewhere
+between 1:10 and 1:100. Lockless is hard, and the complexity of lockless
+algorithms often endangers to ability to do robust reviews of said code.
+I.e. critical RT apps often choose lock structures to protect critical
+data structures, instead of lockless algorithms. Furthermore, there are
+cases (like shared hardware, or other resource limits) where lockless
+access is mathematically impossible.
+
+Media players (such as Jack) are an example of reasonable application
+design with multiple tasks (with multiple priority levels) sharing
+short-held locks: for example, a highprio audio playback thread is
+combined with medium-prio construct-audio-data threads and low-prio
+display-colory-stuff threads. Add video and decoding to the mix and
+we've got even more priority levels.
+
+So once we accept that synchronization objects (locks) are an
+unavoidable fact of life, and once we accept that multi-task userspace
+apps have a very fair expectation of being able to use locks, we've got
+to think about how to offer the option of a deterministic locking
+implementation to user-space.
+
+Most of the technical counter-arguments against doing priority
+inheritance only apply to kernel-space locks. But user-space locks are
+different, there we cannot disable interrupts or make the task
+non-preemptible in a critical section, so the 'use spinlocks' argument
+does not apply (user-space spinlocks have the same priority inversion
+problems as other user-space locking constructs). Fact is, pretty much
+the only technique that currently enables good determinism for userspace
+locks (such as futex-based pthread mutexes) is priority inheritance:
+
+Currently (without PI), if a high-prio and a low-prio task shares a lock
+[this is a quite common scenario for most non-trivial RT applications],
+even if all critical sections are coded carefully to be deterministic
+(i.e. all critical sections are short in duration and only execute a
+limited number of instructions), the kernel cannot guarantee any
+deterministic execution of the high-prio task: any medium-priority task
+could preempt the low-prio task while it holds the shared lock and
+executes the critical section, and could delay it indefinitely.
+
+Implementation:
+---------------
+
+As mentioned before, the userspace fastpath of PI-enabled pthread
+mutexes involves no kernel work at all - they behave quite similarly to
+normal futex-based locks: a 0 value means unlocked, and a value==TID
+means locked. (This is the same method as used by list-based robust
+futexes.) Userspace uses atomic ops to lock/unlock these mutexes without
+entering the kernel.
+
+To handle the slowpath, we have added two new futex ops:
+
+ FUTEX_LOCK_PI
+ FUTEX_UNLOCK_PI
+
+If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to
+TID fails], then FUTEX_LOCK_PI is called. The kernel does all the
+remaining work: if there is no futex-queue attached to the futex address
+yet then the code looks up the task that owns the futex [it has put its
+own TID into the futex value], and attaches a 'PI state' structure to
+the futex-queue. The pi_state includes an rt-mutex, which is a PI-aware,
+kernel-based synchronization object. The 'other' task is made the owner
+of the rt-mutex, and the FUTEX_WAITERS bit is atomically set in the
+futex value. Then this task tries to lock the rt-mutex, on which it
+blocks. Once it returns, it has the mutex acquired, and it sets the
+futex value to its own TID and returns. Userspace has no other work to
+perform - it now owns the lock, and futex value contains
+FUTEX_WAITERS|TID.
+
+If the unlock side fastpath succeeds, [i.e. userspace manages to do a
+TID -> 0 atomic transition of the futex value], then no kernel work is
+triggered.
+
+If the unlock fastpath fails (because the FUTEX_WAITERS bit is set),
+then FUTEX_UNLOCK_PI is called, and the kernel unlocks the futex on the
+behalf of userspace - and it also unlocks the attached
+pi_state->rt_mutex and thus wakes up any potential waiters.
+
+Note that under this approach, contrary to previous PI-futex approaches,
+there is no prior 'registration' of a PI-futex. [which is not quite
+possible anyway, due to existing ABI properties of pthread mutexes.]
+
+Also, under this scheme, 'robustness' and 'PI' are two orthogonal
+properties of futexes, and all four combinations are possible: futex,
+robust-futex, PI-futex, robust+PI-futex.
+
+More details about priority inheritance can be found in
+Documentation/rtmutex.txt.
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index df82d75245a0..76e8064b8c3a 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -95,7 +95,7 @@ comparison. If the thread has registered a list, then normally the list
is empty. If the thread/process crashed or terminated in some incorrect
way then the list might be non-empty: in this case the kernel carefully
walks the list [not trusting it], and marks all locks that are owned by
-this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if
+this thread with the FUTEX_OWNER_DIED bit, and wakes up one waiter (if
any).
The list is guaranteed to be private and per-thread at do_exit() time,
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
new file mode 100644
index 000000000000..c472ffacc2f6
--- /dev/null
+++ b/Documentation/rt-mutex-design.txt
@@ -0,0 +1,781 @@
+#
+# Copyright (c) 2006 Steven Rostedt
+# Licensed under the GNU Free Documentation License, Version 1.2
+#
+
+RT-mutex implementation design
+------------------------------
+
+This document tries to describe the design of the rtmutex.c implementation.
+It doesn't describe the reasons why rtmutex.c exists. For that please see
+Documentation/rt-mutex.txt. Although this document does explain problems
+that happen without this code, but that is in the concept to understand
+what the code actually is doing.
+
+The goal of this document is to help others understand the priority
+inheritance (PI) algorithm that is used, as well as reasons for the
+decisions that were made to implement PI in the manner that was done.
+
+
+Unbounded Priority Inversion
+----------------------------
+
+Priority inversion is when a lower priority process executes while a higher
+priority process wants to run. This happens for several reasons, and
+most of the time it can't be helped. Anytime a high priority process wants
+to use a resource that a lower priority process has (a mutex for example),
+the high priority process must wait until the lower priority process is done
+with the resource. This is a priority inversion. What we want to prevent
+is something called unbounded priority inversion. That is when the high
+priority process is prevented from running by a lower priority process for
+an undetermined amount of time.
+
+The classic example of unbounded priority inversion is were you have three
+processes, let's call them processes A, B, and C, where A is the highest
+priority process, C is the lowest, and B is in between. A tries to grab a lock
+that C owns and must wait and lets C run to release the lock. But in the
+meantime, B executes, and since B is of a higher priority than C, it preempts C,
+but by doing so, it is in fact preempting A which is a higher priority process.
+Now there's no way of knowing how long A will be sleeping waiting for C
+to release the lock, because for all we know, B is a CPU hog and will
+never give C a chance to release the lock. This is called unbounded priority
+inversion.
+
+Here's a little ASCII art to show the problem.
+
+ grab lock L1 (owned by C)
+ |
+A ---+
+ C preempted by B
+ |
+C +----+
+
+B +-------->
+ B now keeps A from running.
+
+
+Priority Inheritance (PI)
+-------------------------
+
+There are several ways to solve this issue, but other ways are out of scope
+for this document. Here we only discuss PI.
+
+PI is where a process inherits the priority of another process if the other
+process blocks on a lock owned by the current process. To make this easier
+to understand, let's use the previous example, with processes A, B, and C again.
+
+This time, when A blocks on the lock owned by C, C would inherit the priority
+of A. So now if B becomes runnable, it would not preempt C, since C now has
+the high priority of A. As soon as C releases the lock, it loses its
+inherited priority, and A then can continue with the resource that C had.
+
+Terminology
+-----------
+
+Here I explain some terminology that is used in this document to help describe
+the design that is used to implement PI.
+
+PI chain - The PI chain is an ordered series of locks and processes that cause
+ processes to inherit priorities from a previous process that is
+ blocked on one of its locks. This is described in more detail
+ later in this document.
+
+mutex - In this document, to differentiate from locks that implement
+ PI and spin locks that are used in the PI code, from now on
+ the PI locks will be called a mutex.
+
+lock - In this document from now on, I will use the term lock when
+ referring to spin locks that are used to protect parts of the PI
+ algorithm. These locks disable preemption for UP (when
+ CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from
+ entering critical sections simultaneously.
+
+spin lock - Same as lock above.
+
+waiter - A waiter is a struct that is stored on the stack of a blocked
+ process. Since the scope of the waiter is within the code for
+ a process being blocked on the mutex, it is fine to allocate
+ the waiter on the process's stack (local variable). This
+ structure holds a pointer to the task, as well as the mutex that
+ the task is blocked on. It also has the plist node structures to
+ place the task in the waiter_list of a mutex as well as the
+ pi_list of a mutex owner task (described below).
+
+ waiter is sometimes used in reference to the task that is waiting
+ on a mutex. This is the same as waiter->task.
+
+waiters - A list of processes that are blocked on a mutex.
+
+top waiter - The highest priority process waiting on a specific mutex.
+
+top pi waiter - The highest priority process waiting on one of the mutexes
+ that a specific process owns.
+
+Note: task and process are used interchangeably in this document, mostly to
+ differentiate between two processes that are being described together.
+
+
+PI chain
+--------
+
+The PI chain is a list of processes and mutexes that may cause priority
+inheritance to take place. Multiple chains may converge, but a chain
+would never diverge, since a process can't be blocked on more than one
+mutex at a time.
+
+Example:
+
+ Process: A, B, C, D, E
+ Mutexes: L1, L2, L3, L4
+
+ A owns: L1
+ B blocked on L1
+ B owns L2
+ C blocked on L2
+ C owns L3
+ D blocked on L3
+ D owns L4
+ E blocked on L4
+
+The chain would be:
+
+ E->L4->D->L3->C->L2->B->L1->A
+
+To show where two chains merge, we could add another process F and
+another mutex L5 where B owns L5 and F is blocked on mutex L5.
+
+The chain for F would be:
+
+ F->L5->B->L1->A
+
+Since a process may own more than one mutex, but never be blocked on more than
+one, the chains merge.
+
+Here we show both chains:
+
+ E->L4->D->L3->C->L2-+
+ |
+ +->B->L1->A
+ |
+ F->L5-+
+
+For PI to work, the processes at the right end of these chains (or we may
+also call it the Top of the chain) must be equal to or higher in priority
+than the processes to the left or below in the chain.
+
+Also since a mutex may have more than one process blocked on it, we can
+have multiple chains merge at mutexes. If we add another process G that is
+blocked on mutex L2:
+
+ G->L2->B->L1->A
+
+And once again, to show how this can grow I will show the merging chains
+again.
+
+ E->L4->D->L3->C-+
+ +->L2-+
+ | |
+ G-+ +->B->L1->A
+ |
+ F->L5-+
+
+
+Plist
+-----
+
+Before I go further and talk about how the PI chain is stored through lists
+on both mutexes and processes, I'll explain the plist. This is similar to
+the struct list_head functionality that is already in the kernel.
+The implementation of plist is out of scope for this document, but it is
+very important to understand what it does.
+
+There are a few differences between plist and list, the most important one
+being that plist is a priority sorted linked list. This means that the
+priorities of the plist are sorted, such that it takes O(1) to retrieve the
+highest priority item in the list. Obviously this is useful to store processes
+based on their priorities.
+
+Another difference, which is important for implementation, is that, unlike
+list, the head of the list is a different element than the nodes of a list.
+So the head of the list is declared as struct plist_head and nodes that will
+be added to the list are declared as struct plist_node.
+
+
+Mutex Waiter List
+-----------------
+
+Every mutex keeps track of all the waiters that are blocked on itself. The mutex
+has a plist to store these waiters by priority. This list is protected by
+a spin lock that is located in the struct of the mutex. This lock is called
+wait_lock. Since the modification of the waiter list is never done in
+interrupt context, the wait_lock can be taken without disabling interrupts.
+
+
+Task PI List
+------------
+
+To keep track of the PI chains, each process has its own PI list. This is
+a list of all top waiters of the mutexes that are owned by the process.
+Note that this list only holds the top waiters and not all waiters that are
+blocked on mutexes owned by the process.
+
+The top of the task's PI list is always the highest priority task that
+is waiting on a mutex that is owned by the task. So if the task has
+inherited a priority, it will always be the priority of the task that is
+at the top of this list.
+
+This list is stored in the task structure of a process as a plist called
+pi_list. This list is protected by a spin lock also in the task structure,
+called pi_lock. This lock may also be taken in interrupt context, so when
+locking the pi_lock, interrupts must be disabled.
+
+
+Depth of the PI Chain
+---------------------
+
+The maximum depth of the PI chain is not dynamic, and could actually be
+defined. But is very complex to figure it out, since it depends on all
+the nesting of mutexes. Let's look at the example where we have 3 mutexes,
+L1, L2, and L3, and four separate functions func1, func2, func3 and func4.
+The following shows a locking order of L1->L2->L3, but may not actually
+be directly nested that way.
+
+void func1(void)
+{
+ mutex_lock(L1);
+
+ /* do anything */
+
+ mutex_unlock(L1);
+}
+
+void func2(void)
+{
+ mutex_lock(L1);
+ mutex_lock(L2);
+
+ /* do something */
+
+ mutex_unlock(L2);
+ mutex_unlock(L1);
+}
+
+void func3(void)
+{
+ mutex_lock(L2);
+ mutex_lock(L3);
+
+ /* do something else */
+
+ mutex_unlock(L3);
+ mutex_unlock(L2);
+}
+
+void func4(void)
+{
+ mutex_lock(L3);
+
+ /* do something again */
+
+ mutex_unlock(L3);
+}
+
+Now we add 4 processes that run each of these functions separately.
+Processes A, B, C, and D which run functions func1, func2, func3 and func4
+respectively, and such that D runs first and A last. With D being preempted
+in func4 in the "do something again" area, we have a locking that follows:
+
+D owns L3
+ C blocked on L3
+ C owns L2
+ B blocked on L2
+ B owns L1
+ A blocked on L1
+
+And thus we have the chain A->L1->B->L2->C->L3->D.
+
+This gives us a PI depth of 4 (four processes), but looking at any of the
+functions individually, it seems as though they only have at most a locking
+depth of two. So, although the locking depth is defined at compile time,
+it still is very difficult to find the possibilities of that depth.
+
+Now since mutexes can be defined by user-land applications, we don't want a DOS
+type of application that nests large amounts of mutexes to create a large
+PI chain, and have the code holding spin locks while looking at a large
+amount of data. So to prevent this, the implementation not only implements
+a maximum lock depth, but also only holds at most two different locks at a
+time, as it walks the PI chain. More about this below.
+
+
+Mutex owner and flags
+---------------------
+
+The mutex structure contains a pointer to the owner of the mutex. If the
+mutex is not owned, this owner is set to NULL. Since all architectures
+have the task structure on at least a four byte alignment (and if this is
+not true, the rtmutex.c code will be broken!), this allows for the two
+least significant bits to be used as flags. This part is also described
+in Documentation/rt-mutex.txt, but will also be briefly described here.
+
+Bit 0 is used as the "Pending Owner" flag. This is described later.
+Bit 1 is used as the "Has Waiters" flags. This is also described later
+ in more detail, but is set whenever there are waiters on a mutex.
+
+
+cmpxchg Tricks
+--------------
+
+Some architectures implement an atomic cmpxchg (Compare and Exchange). This
+is used (when applicable) to keep the fast path of grabbing and releasing
+mutexes short.
+
+cmpxchg is basically the following function performed atomically:
+
+unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C)
+{
+ unsigned long T = *A;
+ if (*A == *B) {
+ *A = *C;
+ }
+ return T;
+}
+#define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c)
+
+This is really nice to have, since it allows you to only update a variable
+if the variable is what you expect it to be. You know if it succeeded if
+the return value (the old value of A) is equal to B.
+
+The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If
+the architecture does not support CMPXCHG, then this macro is simply set
+to fail every time. But if CMPXCHG is supported, then this will
+help out extremely to keep the fast path short.
+
+The use of rt_mutex_cmpxchg with the flags in the owner field help optimize
+the system for architectures that support it. This will also be explained
+later in this document.
+
+
+Priority adjustments
+--------------------
+
+The implementation of the PI code in rtmutex.c has several places that a
+process must adjust its priority. With the help of the pi_list of a
+process this is rather easy to know what needs to be adjusted.
+
+The functions implementing the task adjustments are rt_mutex_adjust_prio,
+__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock
+to already be taken), rt_mutex_get_prio, and rt_mutex_setprio.
+
+rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio.
+
+rt_mutex_getprio returns the priority that the task should have. Either the
+task's own normal priority, or if a process of a higher priority is waiting on
+a mutex owned by the task, then that higher priority should be returned.
+Since the pi_list of a task holds an order by priority list of all the top
+waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs
+to compare the top pi waiter to its own normal priority, and return the higher
+priority back.
+
+(Note: if looking at the code, you will notice that the lower number of
+ prio is returned. This is because the prio field in the task structure
+ is an inverse order of the actual priority. So a "prio" of 5 is
+ of higher priority than a "prio" of 10.)
+
+__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the
+result does not equal the task's current priority, then rt_mutex_setprio
+is called to adjust the priority of the task to the new priority.
+Note that rt_mutex_setprio is defined in kernel/sched.c to implement the
+actual change in priority.
+
+It is interesting to note that __rt_mutex_adjust_prio can either increase
+or decrease the priority of the task. In the case that a higher priority
+process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio
+would increase/boost the task's priority. But if a higher priority task
+were for some reason to leave the mutex (timeout or signal), this same function
+would decrease/unboost the priority of the task. That is because the pi_list
+always contains the highest priority task that is waiting on a mutex owned
+by the task, so we only need to compare the priority of that top pi waiter
+to the normal priority of the given task.
+
+
+High level overview of the PI chain walk
+----------------------------------------
+
+The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain.
+
+The implementation has gone through several iterations, and has ended up
+with what we believe is the best. It walks the PI chain by only grabbing
+at most two locks at a time, and is very efficient.
+
+The rt_mutex_adjust_prio_chain can be used either to boost or lower process
+priorities.
+
+rt_mutex_adjust_prio_chain is called with a task to be checked for PI
+(de)boosting (the owner of a mutex that a process is blocking on), a flag to
+check for deadlocking, the mutex that the task owns, and a pointer to a waiter
+that is the process's waiter struct that is blocked on the mutex (although this
+parameter may be NULL for deboosting).
+
+For this explanation, I will not mention deadlock detection. This explanation
+will try to stay at a high level.
+
+When this function is called, there are no locks held. That also means
+that the state of the owner and lock can change when entered into this function.
+
+Before this function is called, the task has already had rt_mutex_adjust_prio
+performed on it. This means that the task is set to the priority that it
+should be at, but the plist nodes of the task's waiter have not been updated
+with the new priorities, and that this task may not be in the proper locations
+in the pi_lists and wait_lists that the task is blocked on. This function
+solves all that.
+
+A loop is entered, where task is the owner to be checked for PI changes that
+was passed by parameter (for the first iteration). The pi_lock of this task is
+taken to prevent any more changes to the pi_list of the task. This also
+prevents new tasks from completing the blocking on a mutex that is owned by this
+task.
+
+If the task is not blocked on a mutex then the loop is exited. We are at
+the top of the PI chain.
+
+A check is now done to see if the original waiter (the process that is blocked
+on the current mutex) is the top pi waiter of the task. That is, is this
+waiter on the top of the task's pi_list. If it is not, it either means that
+there is another process higher in priority that is blocked on one of the
+mutexes that the task owns, or that the waiter has just woken up via a signal
+or timeout and has left the PI chain. In either case, the loop is exited, since
+we don't need to do any more changes to the priority of the current task, or any
+task that owns a mutex that this current task is waiting on. A priority chain
+walk is only needed when a new top pi waiter is made to a task.
+
+The next check sees if the task's waiter plist node has the priority equal to
+the priority the task is set at. If they are equal, then we are done with
+the loop. Remember that the function started with the priority of the
+task adjusted, but the plist nodes that hold the task in other processes
+pi_lists have not been adjusted.
+
+Next, we look at the mutex that the task is blocked on. The mutex's wait_lock
+is taken. This is done by a spin_trylock, because the locking order of the
+pi_lock and wait_lock goes in the opposite direction. If we fail to grab the
+lock, the pi_lock is released, and we restart the loop.
+
+Now that we have both the pi_lock of the task as well as the wait_lock of
+the mutex the task is blocked on, we update the task's waiter's plist node
+that is located on the mutex's wait_list.
+
+Now we release the pi_lock of the task.
+
+Next the owner of the mutex has its pi_lock taken, so we can update the
+task's entry in the owner's pi_list. If the task is the highest priority
+process on the mutex's wait_list, then we remove the previous top waiter
+from the owner's pi_list, and replace it with the task.
+
+Note: It is possible that the task was the current top waiter on the mutex,
+ in which case the task is not yet on the pi_list of the waiter. This
+ is OK, since plist_del does nothing if the plist node is not on any
+ list.
+
+If the task was not the top waiter of the mutex, but it was before we
+did the priority updates, that means we are deboosting/lowering the
+task. In this case, the task is removed from the pi_list of the owner,
+and the new top waiter is added.
+
+Lastly, we unlock both the pi_lock of the task, as well as the mutex's
+wait_lock, and continue the loop again. On the next iteration of the
+loop, the previous owner of the mutex will be the task that will be
+processed.
+
+Note: One might think that the owner of this mutex might have changed
+ since we just grab the mutex's wait_lock. And one could be right.
+ The important thing to remember is that the owner could not have
+ become the task that is being processed in the PI chain, since
+ we have taken that task's pi_lock at the beginning of the loop.
+ So as long as there is an owner of this mutex that is not the same
+ process as the tasked being worked on, we are OK.
+
+ Looking closely at the code, one might be confused. The check for the
+ end of the PI chain is when the task isn't blocked on anything or the
+ task's waiter structure "task" element is NULL. This check is
+ protected only by the task's pi_lock. But the code to unlock the mutex
+ sets the task's waiter structure "task" element to NULL with only
+ the protection of the mutex's wait_lock, which was not taken yet.
+ Isn't this a race condition if the task becomes the new owner?
+
+ The answer is No! The trick is the spin_trylock of the mutex's
+ wait_lock. If we fail that lock, we release the pi_lock of the
+ task and continue the loop, doing the end of PI chain check again.
+
+ In the code to release the lock, the wait_lock of the mutex is held
+ the entire time, and it is not let go when we grab the pi_lock of the
+ new owner of the mutex. So if the switch of a new owner were to happen
+ after the check for end of the PI chain and the grabbing of the
+ wait_lock, the unlocking code would spin on the new owner's pi_lock
+ but never give up the wait_lock. So the PI chain loop is guaranteed to
+ fail the spin_trylock on the wait_lock, release the pi_lock, and
+ try again.
+
+ If you don't quite understand the above, that's OK. You don't have to,
+ unless you really want to make a proof out of it ;)
+
+
+Pending Owners and Lock stealing
+--------------------------------
+
+One of the flags in the owner field of the mutex structure is "Pending Owner".
+What this means is that an owner was chosen by the process releasing the
+mutex, but that owner has yet to wake up and actually take the mutex.
+
+Why is this important? Why can't we just give the mutex to another process
+and be done with it?
+
+The PI code is to help with real-time processes, and to let the highest
+priority process run as long as possible with little latencies and delays.
+If a high priority process owns a mutex that a lower priority process is
+blocked on, when the mutex is released it would be given to the lower priority
+process. What if the higher priority process wants to take that mutex again.
+The high priority process would fail to take that mutex that it just gave up
+and it would need to boost the lower priority process to run with full
+latency of that critical section (since the low priority process just entered
+it).
+
+There's no reason a high priority process that gives up a mutex should be
+penalized if it tries to take that mutex again. If the new owner of the
+mutex has not woken up yet, there's no reason that the higher priority process
+could not take that mutex away.
+
+To solve this, we introduced Pending Ownership and Lock Stealing. When a
+new process is given a mutex that it was blocked on, it is only given
+pending ownership. This means that it's the new owner, unless a higher
+priority process comes in and tries to grab that mutex. If a higher priority
+process does come along and wants that mutex, we let the higher priority
+process "steal" the mutex from the pending owner (only if it is still pending)
+and continue with the mutex.
+
+
+Taking of a mutex (The walk through)
+------------------------------------
+
+OK, now let's take a look at the detailed walk through of what happens when
+taking a mutex.
+
+The first thing that is tried is the fast taking of the mutex. This is
+done when we have CMPXCHG enabled (otherwise the fast taking automatically
+fails). Only when the owner field of the mutex is NULL can the lock be
+taken with the CMPXCHG and nothing else needs to be done.
+
+If there is contention on the lock, whether it is owned or pending owner
+we go about the slow path (rt_mutex_slowlock).
+
+The slow path function is where the task's waiter structure is created on
+the stack. This is because the waiter structure is only needed for the
+scope of this function. The waiter structure holds the nodes to store
+the task on the wait_list of the mutex, and if need be, the pi_list of
+the owner.
+
+The wait_lock of the mutex is taken since the slow path of unlocking the
+mutex also takes this lock.
+
+We then call try_to_take_rt_mutex. This is where the architecture that
+does not implement CMPXCHG would always grab the lock (if there's no
+contention).
+
+try_to_take_rt_mutex is used every time the task tries to grab a mutex in the
+slow path. The first thing that is done here is an atomic setting of
+the "Has Waiters" flag of the mutex's owner field. Yes, this could really
+be false, because if the the mutex has no owner, there are no waiters and
+the current task also won't have any waiters. But we don't have the lock
+yet, so we assume we are going to be a waiter. The reason for this is to
+play nice for those architectures that do have CMPXCHG. By setting this flag
+now, the owner of the mutex can't release the mutex without going into the
+slow unlock path, and it would then need to grab the wait_lock, which this
+code currently holds. So setting the "Has Waiters" flag forces the owner
+to synchronize with this code.
+
+Now that we know that we can't have any races with the owner releasing the
+mutex, we check to see if we can take the ownership. This is done if the
+mutex doesn't have a owner, or if we can steal the mutex from a pending
+owner. Let's look at the situations we have here.
+
+ 1) Has owner that is pending
+ ----------------------------
+
+ The mutex has a owner, but it hasn't woken up and the mutex flag
+ "Pending Owner" is set. The first check is to see if the owner isn't the
+ current task. This is because this function is also used for the pending
+ owner to grab the mutex. When a pending owner wakes up, it checks to see
+ if it can take the mutex, and this is done if the owner is already set to
+ itself. If so, we succeed and leave the function, clearing the "Pending
+ Owner" bit.
+
+ If the pending owner is not current, we check to see if the current priority is
+ higher than the pending owner. If not, we fail the function and return.
+
+ There's also something special about a pending owner. That is a pending owner
+ is never blocked on a mutex. So there is no PI chain to worry about. It also
+ means that if the mutex doesn't have any waiters, there's no accounting needed
+ to update the pending owner's pi_list, since we only worry about processes
+ blocked on the current mutex.
+
+ If there are waiters on this mutex, and we just stole the ownership, we need
+ to take the top waiter, remove it from the pi_list of the pending owner, and
+ add it to the current pi_list. Note that at this moment, the pending owner
+ is no longer on the list of waiters. This is fine, since the pending owner
+ would add itself back when it realizes that it had the ownership stolen
+ from itself. When the pending owner tries to grab the mutex, it will fail
+ in try_to_take_rt_mutex if the owner field points to another process.
+
+ 2) No owner
+ -----------
+
+ If there is no owner (or we successfully stole the lock), we set the owner
+ of the mutex to current, and set the flag of "Has Waiters" if the current
+ mutex actually has waiters, or we clear the flag if it doesn't. See, it was
+ OK that we set that flag early, since now it is cleared.
+
+ 3) Failed to grab ownership
+ ---------------------------
+
+ The most interesting case is when we fail to take ownership. This means that
+ there exists an owner, or there's a pending owner with equal or higher
+ priority than the current task.
+
+We'll continue on the failed case.
+
+If the mutex has a timeout, we set up a timer to go off to break us out
+of this mutex if we failed to get it after a specified amount of time.
+
+Now we enter a loop that will continue to try to take ownership of the mutex, or
+fail from a timeout or signal.
+
+Once again we try to take the mutex. This will usually fail the first time
+in the loop, since it had just failed to get the mutex. But the second time
+in the loop, this would likely succeed, since the task would likely be
+the pending owner.
+
+If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done
+here.
+
+The waiter structure has a "task" field that points to the task that is blocked
+on the mutex. This field can be NULL the first time it goes through the loop
+or if the task is a pending owner and had it's mutex stolen. If the "task"
+field is NULL then we need to set up the accounting for it.
+
+Task blocks on mutex
+--------------------
+
+The accounting of a mutex and process is done with the waiter structure of
+the process. The "task" field is set to the process, and the "lock" field
+to the mutex. The plist nodes are initialized to the processes current
+priority.
+
+Since the wait_lock was taken at the entry of the slow lock, we can safely
+add the waiter to the wait_list. If the current process is the highest
+priority process currently waiting on this mutex, then we remove the
+previous top waiter process (if it exists) from the pi_list of the owner,
+and add the current process to that list. Since the pi_list of the owner
+has changed, we call rt_mutex_adjust_prio on the owner to see if the owner
+should adjust its priority accordingly.
+
+If the owner is also blocked on a lock, and had its pi_list changed
+(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead
+and run rt_mutex_adjust_prio_chain on the owner, as described earlier.
+
+Now all locks are released, and if the current process is still blocked on a
+mutex (waiter "task" field is not NULL), then we go to sleep (call schedule).
+
+Waking up in the loop
+---------------------
+
+The schedule can then wake up for a few reasons.
+ 1) we were given pending ownership of the mutex.
+ 2) we received a signal and was TASK_INTERRUPTIBLE
+ 3) we had a timeout and was TASK_INTERRUPTIBLE
+
+In any of these cases, we continue the loop and once again try to grab the
+ownership of the mutex. If we succeed, we exit the loop, otherwise we continue
+and on signal and timeout, will exit the loop, or if we had the mutex stolen
+we just simply add ourselves back on the lists and go back to sleep.
+
+Note: For various reasons, because of timeout and signals, the steal mutex
+ algorithm needs to be careful. This is because the current process is
+ still on the wait_list. And because of dynamic changing of priorities,
+ especially on SCHED_OTHER tasks, the current process can be the
+ highest priority task on the wait_list.
+
+Failed to get mutex on Timeout or Signal
+----------------------------------------
+
+If a timeout or signal occurred, the waiter's "task" field would not be
+NULL and the task needs to be taken off the wait_list of the mutex and perhaps
+pi_list of the owner. If this process was a high priority process, then
+the rt_mutex_adjust_prio_chain needs to be executed again on the owner,
+but this time it will be lowering the priorities.
+
+
+Unlocking the Mutex
+-------------------
+
+The unlocking of a mutex also has a fast path for those architectures with
+CMPXCHG. Since the taking of a mutex on contention always sets the
+"Has Waiters" flag of the mutex's owner, we use this to know if we need to
+take the slow path when unlocking the mutex. If the mutex doesn't have any
+waiters, the owner field of the mutex would equal the current process and
+the mutex can be unlocked by just replacing the owner field with NULL.
+
+If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available),
+the slow unlock path is taken.
+
+The first thing done in the slow unlock path is to take the wait_lock of the
+mutex. This synchronizes the locking and unlocking of the mutex.
+
+A check is made to see if the mutex has waiters or not. On architectures that
+do not have CMPXCHG, this is the location that the owner of the mutex will
+determine if a waiter needs to be awoken or not. On architectures that
+do have CMPXCHG, that check is done in the fast path, but it is still needed
+in the slow path too. If a waiter of a mutex woke up because of a signal
+or timeout between the time the owner failed the fast path CMPXCHG check and
+the grabbing of the wait_lock, the mutex may not have any waiters, thus the
+owner still needs to make this check. If there are no waiters than the mutex
+owner field is set to NULL, the wait_lock is released and nothing more is
+needed.
+
+If there are waiters, then we need to wake one up and give that waiter
+pending ownership.
+
+On the wake up code, the pi_lock of the current owner is taken. The top
+waiter of the lock is found and removed from the wait_list of the mutex
+as well as the pi_list of the current owner. The task field of the new
+pending owner's waiter structure is set to NULL, and the owner field of the
+mutex is set to the new owner with the "Pending Owner" bit set, as well
+as the "Has Waiters" bit if there still are other processes blocked on the
+mutex.
+
+The pi_lock of the previous owner is released, and the new pending owner's
+pi_lock is taken. Remember that this is the trick to prevent the race
+condition in rt_mutex_adjust_prio_chain from adding itself as a waiter
+on the mutex.
+
+We now clear the "pi_blocked_on" field of the new pending owner, and if
+the mutex still has waiters pending, we add the new top waiter to the pi_list
+of the pending owner.
+
+Finally we unlock the pi_lock of the pending owner and wake it up.
+
+
+Contact
+-------
+
+For updates on this document, please email Steven Rostedt <rostedt@goodmis.org>
+
+
+Credits
+-------
+
+Author: Steven Rostedt <rostedt@goodmis.org>
+
+Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap
+
+Updates
+-------
+
+This document was originally written for 2.6.17-rc3-mm1
diff --git a/Documentation/rt-mutex.txt b/Documentation/rt-mutex.txt
new file mode 100644
index 000000000000..243393d882ee
--- /dev/null
+++ b/Documentation/rt-mutex.txt
@@ -0,0 +1,79 @@
+RT-mutex subsystem with PI support
+----------------------------------
+
+RT-mutexes with priority inheritance are used to support PI-futexes,
+which enable pthread_mutex_t priority inheritance attributes
+(PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details
+about PI-futexes.]
+
+This technology was developed in the -rt tree and streamlined for
+pthread_mutex support.
+
+Basic principles:
+-----------------
+
+RT-mutexes extend the semantics of simple mutexes by the priority
+inheritance protocol.
+
+A low priority owner of a rt-mutex inherits the priority of a higher
+priority waiter until the rt-mutex is released. If the temporarily
+boosted owner blocks on a rt-mutex itself it propagates the priority
+boosting to the owner of the other rt_mutex it gets blocked on. The
+priority boosting is immediately removed once the rt_mutex has been
+unlocked.
+
+This approach allows us to shorten the block of high-prio tasks on
+mutexes which protect shared resources. Priority inheritance is not a
+magic bullet for poorly designed applications, but it allows
+well-designed applications to use userspace locks in critical parts of
+an high priority thread, without losing determinism.
+
+The enqueueing of the waiters into the rtmutex waiter list is done in
+priority order. For same priorities FIFO order is chosen. For each
+rtmutex, only the top priority waiter is enqueued into the owner's
+priority waiters list. This list too queues in priority order. Whenever
+the top priority waiter of a task changes (for example it timed out or
+got a signal), the priority of the owner task is readjusted. [The
+priority enqueueing is handled by "plists", see include/linux/plist.h
+for more details.]
+
+RT-mutexes are optimized for fastpath operations and have no internal
+locking overhead when locking an uncontended mutex or unlocking a mutex
+without waiters. The optimized fastpath operations require cmpxchg
+support. [If that is not available then the rt-mutex internal spinlock
+is used]
+
+The state of the rt-mutex is tracked via the owner field of the rt-mutex
+structure:
+
+rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1
+are used to keep track of the "owner is pending" and "rtmutex has
+waiters" state.
+
+ owner bit1 bit0
+ NULL 0 0 mutex is free (fast acquire possible)
+ NULL 0 1 invalid state
+ NULL 1 0 Transitional state*
+ NULL 1 1 invalid state
+ taskpointer 0 0 mutex is held (fast release possible)
+ taskpointer 0 1 task is pending owner
+ taskpointer 1 0 mutex is held and has waiters
+ taskpointer 1 1 task is pending owner and mutex has waiters
+
+Pending-ownership handling is a performance optimization:
+pending-ownership is assigned to the first (highest priority) waiter of
+the mutex, when the mutex is released. The thread is woken up and once
+it starts executing it can acquire the mutex. Until the mutex is taken
+by it (bit 0 is cleared) a competing higher priority thread can "steal"
+the mutex which puts the woken up thread back on the waiters list.
+
+The pending-ownership optimization is especially important for the
+uninterrupted workflow of high-prio tasks which repeatedly
+takes/releases locks that have lower-prio waiters. Without this
+optimization the higher-prio thread would ping-pong to the lower-prio
+task [because at unlock time we always assign a new owner].
+
+(*) The "mutex has waiters" bit gets set to take the lock. If the lock
+doesn't already have an owner, this bit is quickly cleared if there are
+no waiters. So this is a transitional state to synchronize with looking
+at the owner field of the mutex and the mutex owner releasing the lock.
diff --git a/Documentation/scsi/ppa.txt b/Documentation/scsi/ppa.txt
index 0dac88d86d87..5d9223bc1bd5 100644
--- a/Documentation/scsi/ppa.txt
+++ b/Documentation/scsi/ppa.txt
@@ -12,5 +12,3 @@ http://www.torque.net/parport/
Email list for Linux Parport
linux-parport@torque.net
-Email for problems with ZIP or ZIP Plus drivers
-campbell@torque.net
diff --git a/Documentation/tty.txt b/Documentation/tty.txt
index 8ff7bc2a0811..dab56604745d 100644
--- a/Documentation/tty.txt
+++ b/Documentation/tty.txt
@@ -80,13 +80,6 @@ receive_buf() - Hand buffers of bytes from the driver to the ldisc
for processing. Semantics currently rather
mysterious 8(
-receive_room() - Can be called by the driver layer at any time when
- the ldisc is opened. The ldisc must be able to
- handle the reported amount of data at that instant.
- Synchronization between active receive_buf and
- receive_room calls is down to the driver not the
- ldisc. Must not sleep.
-
write_wakeup() - May be called at any point between open and close.
The TTY_DO_WRITE_WAKEUP flag indicates if a call
is needed but always races versus calls. Thus the
diff --git a/Documentation/video4linux/README.pvrusb2 b/Documentation/video4linux/README.pvrusb2
new file mode 100644
index 000000000000..c73a32c34528
--- /dev/null
+++ b/Documentation/video4linux/README.pvrusb2
@@ -0,0 +1,212 @@
+
+$Id$
+Mike Isely <isely@pobox.com>
+
+ pvrusb2 driver
+
+Background:
+
+ This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which
+ is a USB 2.0 hosted TV Tuner. This driver is a work in progress.
+ Its history started with the reverse-engineering effort by Björn
+ Danielsson <pvrusb2@dax.nu> whose web page can be found here:
+
+ http://pvrusb2.dax.nu/
+
+ From there Aurelien Alleaume <slts@free.fr> began an effort to
+ create a video4linux compatible driver. I began with Aurelien's
+ last known snapshot and evolved the driver to the state it is in
+ here.
+
+ More information on this driver can be found at:
+
+ http://www.isely.net/pvrusb2.html
+
+
+ This driver has a strong separation of layers. They are very
+ roughly:
+
+ 1a. Low level wire-protocol implementation with the device.
+
+ 1b. I2C adaptor implementation and corresponding I2C client drivers
+ implemented elsewhere in V4L.
+
+ 1c. High level hardware driver implementation which coordinates all
+ activities that ensure correct operation of the device.
+
+ 2. A "context" layer which manages instancing of driver, setup,
+ tear-down, arbitration, and interaction with high level
+ interfaces appropriately as devices are hotplugged in the
+ system.
+
+ 3. High level interfaces which glue the driver to various published
+ Linux APIs (V4L, sysfs, maybe DVB in the future).
+
+ The most important shearing layer is between the top 2 layers. A
+ lot of work went into the driver to ensure that any kind of
+ conceivable API can be laid on top of the core driver. (Yes, the
+ driver internally leverages V4L to do its work but that really has
+ nothing to do with the API published by the driver to the outside
+ world.) The architecture allows for different APIs to
+ simultaneously access the driver. I have a strong sense of fairness
+ about APIs and also feel that it is a good design principle to keep
+ implementation and interface isolated from each other. Thus while
+ right now the V4L high level interface is the most complete, the
+ sysfs high level interface will work equally well for similar
+ functions, and there's no reason I see right now why it shouldn't be
+ possible to produce a DVB high level interface that can sit right
+ alongside V4L.
+
+ NOTE: Complete documentation on the pvrusb2 driver is contained in
+ the html files within the doc directory; these are exactly the same
+ as what is on the web site at the time. Browse those files
+ (especially the FAQ) before asking questions.
+
+
+Building
+
+ To build these modules essentially amounts to just running "Make",
+ but you need the kernel source tree nearby and you will likely also
+ want to set a few controlling environment variables first in order
+ to link things up with that source tree. Please see the Makefile
+ here for comments that explain how to do that.
+
+
+Source file list / functional overview:
+
+ (Note: The term "module" used below generally refers to loosely
+ defined functional units within the pvrusb2 driver and bears no
+ relation to the Linux kernel's concept of a loadable module.)
+
+ pvrusb2-audio.[ch] - This is glue logic that resides between this
+ driver and the msp3400.ko I2C client driver (which is found
+ elsewhere in V4L).
+
+ pvrusb2-context.[ch] - This module implements the context for an
+ instance of the driver. Everything else eventually ties back to
+ or is otherwise instanced within the data structures implemented
+ here. Hotplugging is ultimately coordinated here. All high level
+ interfaces tie into the driver through this module. This module
+ helps arbitrate each interface's access to the actual driver core,
+ and is designed to allow concurrent access through multiple
+ instances of multiple interfaces (thus you can for example change
+ the tuner's frequency through sysfs while simultaneously streaming
+ video through V4L out to an instance of mplayer).
+
+ pvrusb2-debug.h - This header defines a printk() wrapper and a mask
+ of debugging bit definitions for the various kinds of debug
+ messages that can be enabled within the driver.
+
+ pvrusb2-debugifc.[ch] - This module implements a crude command line
+ oriented debug interface into the driver. Aside from being part
+ of the process for implementing manual firmware extraction (see
+ the pvrusb2 web site mentioned earlier), probably I'm the only one
+ who has ever used this. It is mainly a debugging aid.
+
+ pvrusb2-eeprom.[ch] - This is glue logic that resides between this
+ driver the tveeprom.ko module, which is itself implemented
+ elsewhere in V4L.
+
+ pvrusb2-encoder.[ch] - This module implements all protocol needed to
+ interact with the Conexant mpeg2 encoder chip within the pvrusb2
+ device. It is a crude echo of corresponding logic in ivtv,
+ however the design goals (strict isolation) and physical layer
+ (proxy through USB instead of PCI) are enough different that this
+ implementation had to be completely different.
+
+ pvrusb2-hdw-internal.h - This header defines the core data structure
+ in the driver used to track ALL internal state related to control
+ of the hardware. Nobody outside of the core hardware-handling
+ modules should have any business using this header. All external
+ access to the driver should be through one of the high level
+ interfaces (e.g. V4L, sysfs, etc), and in fact even those high
+ level interfaces are restricted to the API defined in
+ pvrusb2-hdw.h and NOT this header.
+
+ pvrusb2-hdw.h - This header defines the full internal API for
+ controlling the hardware. High level interfaces (e.g. V4L, sysfs)
+ will work through here.
+
+ pvrusb2-hdw.c - This module implements all the various bits of logic
+ that handle overall control of a specific pvrusb2 device.
+ (Policy, instantiation, and arbitration of pvrusb2 devices fall
+ within the jurisdiction of pvrusb-context not here).
+
+ pvrusb2-i2c-chips-*.c - These modules implement the glue logic to
+ tie together and configure various I2C modules as they attach to
+ the I2C bus. There are two versions of this file. The "v4l2"
+ version is intended to be used in-tree alongside V4L, where we
+ implement just the logic that makes sense for a pure V4L
+ environment. The "all" version is intended for use outside of
+ V4L, where we might encounter other possibly "challenging" modules
+ from ivtv or older kernel snapshots (or even the support modules
+ in the standalone snapshot).
+
+ pvrusb2-i2c-cmd-v4l1.[ch] - This module implements generic V4L1
+ compatible commands to the I2C modules. It is here where state
+ changes inside the pvrusb2 driver are translated into V4L1
+ commands that are in turn send to the various I2C modules.
+
+ pvrusb2-i2c-cmd-v4l2.[ch] - This module implements generic V4L2
+ compatible commands to the I2C modules. It is here where state
+ changes inside the pvrusb2 driver are translated into V4L2
+ commands that are in turn send to the various I2C modules.
+
+ pvrusb2-i2c-core.[ch] - This module provides an implementation of a
+ kernel-friendly I2C adaptor driver, through which other external
+ I2C client drivers (e.g. msp3400, tuner, lirc) may connect and
+ operate corresponding chips within the the pvrusb2 device. It is
+ through here that other V4L modules can reach into this driver to
+ operate specific pieces (and those modules are in turn driven by
+ glue logic which is coordinated by pvrusb2-hdw, doled out by
+ pvrusb2-context, and then ultimately made available to users
+ through one of the high level interfaces).
+
+ pvrusb2-io.[ch] - This module implements a very low level ring of
+ transfer buffers, required in order to stream data from the
+ device. This module is *very* low level. It only operates the
+ buffers and makes no attempt to define any policy or mechanism for
+ how such buffers might be used.
+
+ pvrusb2-ioread.[ch] - This module layers on top of pvrusb2-io.[ch]
+ to provide a streaming API usable by a read() system call style of
+ I/O. Right now this is the only layer on top of pvrusb2-io.[ch],
+ however the underlying architecture here was intended to allow for
+ other styles of I/O to be implemented with additonal modules, like
+ mmap()'ed buffers or something even more exotic.
+
+ pvrusb2-main.c - This is the top level of the driver. Module level
+ and USB core entry points are here. This is our "main".
+
+ pvrusb2-sysfs.[ch] - This is the high level interface which ties the
+ pvrusb2 driver into sysfs. Through this interface you can do
+ everything with the driver except actually stream data.
+
+ pvrusb2-tuner.[ch] - This is glue logic that resides between this
+ driver and the tuner.ko I2C client driver (which is found
+ elsewhere in V4L).
+
+ pvrusb2-util.h - This header defines some common macros used
+ throughout the driver. These macros are not really specific to
+ the driver, but they had to go somewhere.
+
+ pvrusb2-v4l2.[ch] - This is the high level interface which ties the
+ pvrusb2 driver into video4linux. It is through here that V4L
+ applications can open and operate the driver in the usual V4L
+ ways. Note that **ALL** V4L functionality is published only
+ through here and nowhere else.
+
+ pvrusb2-video-*.[ch] - This is glue logic that resides between this
+ driver and the saa711x.ko I2C client driver (which is found
+ elsewhere in V4L). Note that saa711x.ko used to be known as
+ saa7115.ko in ivtv. There are two versions of this; one is
+ selected depending on the particular saa711[5x].ko that is found.
+
+ pvrusb2.h - This header contains compile time tunable parameters
+ (and at the moment the driver has very little that needs to be
+ tuned).
+
+
+ -Mike Isely
+ isely@pobox.com
+
diff --git a/Documentation/watchdog/pcwd-watchdog.txt b/Documentation/watchdog/pcwd-watchdog.txt
index 12187a33e310..d9ee6336c1d4 100644
--- a/Documentation/watchdog/pcwd-watchdog.txt
+++ b/Documentation/watchdog/pcwd-watchdog.txt
@@ -22,78 +22,9 @@
to run the program with an "&" to run it in the background!)
If you want to write a program to be compatible with the PC Watchdog
- driver, simply do the following:
-
--- Snippet of code --
-/*
- * Watchdog Driver Test Program
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <linux/types.h>
-#include <linux/watchdog.h>
-
-int fd;
-
-/*
- * This function simply sends an IOCTL to the driver, which in turn ticks
- * the PC Watchdog card to reset its internal timer so it doesn't trigger
- * a computer reset.
- */
-void keep_alive(void)
-{
- int dummy;
-
- ioctl(fd, WDIOC_KEEPALIVE, &dummy);
-}
-
-/*
- * The main program. Run the program with "-d" to disable the card,
- * or "-e" to enable the card.
- */
-int main(int argc, char *argv[])
-{
- fd = open("/dev/watchdog", O_WRONLY);
-
- if (fd == -1) {
- fprintf(stderr, "Watchdog device not enabled.\n");
- fflush(stderr);
- exit(-1);
- }
-
- if (argc > 1) {
- if (!strncasecmp(argv[1], "-d", 2)) {
- ioctl(fd, WDIOC_SETOPTIONS, WDIOS_DISABLECARD);
- fprintf(stderr, "Watchdog card disabled.\n");
- fflush(stderr);
- exit(0);
- } else if (!strncasecmp(argv[1], "-e", 2)) {
- ioctl(fd, WDIOC_SETOPTIONS, WDIOS_ENABLECARD);
- fprintf(stderr, "Watchdog card enabled.\n");
- fflush(stderr);
- exit(0);
- } else {
- fprintf(stderr, "-d to disable, -e to enable.\n");
- fprintf(stderr, "run by itself to tick the card.\n");
- fflush(stderr);
- exit(0);
- }
- } else {
- fprintf(stderr, "Watchdog Ticking Away!\n");
- fflush(stderr);
- }
-
- while(1) {
- keep_alive();
- sleep(1);
- }
-}
--- End snippet --
+ driver, simply use of modify the watchdog test program:
+ Documentation/watchdog/src/watchdog-test.c
+
Other IOCTL functions include:
diff --git a/Documentation/watchdog/src/watchdog-simple.c b/Documentation/watchdog/src/watchdog-simple.c
new file mode 100644
index 000000000000..85cf17c48669
--- /dev/null
+++ b/Documentation/watchdog/src/watchdog-simple.c
@@ -0,0 +1,15 @@
+#include <stdlib.h>
+#include <fcntl.h>
+
+int main(int argc, const char *argv[]) {
+ int fd = open("/dev/watchdog", O_WRONLY);
+ if (fd == -1) {
+ perror("watchdog");
+ exit(1);
+ }
+ while (1) {
+ write(fd, "\0", 1);
+ fsync(fd);
+ sleep(10);
+ }
+}
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
new file mode 100644
index 000000000000..65f6c19cb865
--- /dev/null
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -0,0 +1,68 @@
+/*
+ * Watchdog Driver Test Program
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+int fd;
+
+/*
+ * This function simply sends an IOCTL to the driver, which in turn ticks
+ * the PC Watchdog card to reset its internal timer so it doesn't trigger
+ * a computer reset.
+ */
+void keep_alive(void)
+{
+ int dummy;
+
+ ioctl(fd, WDIOC_KEEPALIVE, &dummy);
+}
+
+/*
+ * The main program. Run the program with "-d" to disable the card,
+ * or "-e" to enable the card.
+ */
+int main(int argc, char *argv[])
+{
+ fd = open("/dev/watchdog", O_WRONLY);
+
+ if (fd == -1) {
+ fprintf(stderr, "Watchdog device not enabled.\n");
+ fflush(stderr);
+ exit(-1);
+ }
+
+ if (argc > 1) {
+ if (!strncasecmp(argv[1], "-d", 2)) {
+ ioctl(fd, WDIOC_SETOPTIONS, WDIOS_DISABLECARD);
+ fprintf(stderr, "Watchdog card disabled.\n");
+ fflush(stderr);
+ exit(0);
+ } else if (!strncasecmp(argv[1], "-e", 2)) {
+ ioctl(fd, WDIOC_SETOPTIONS, WDIOS_ENABLECARD);
+ fprintf(stderr, "Watchdog card enabled.\n");
+ fflush(stderr);
+ exit(0);
+ } else {
+ fprintf(stderr, "-d to disable, -e to enable.\n");
+ fprintf(stderr, "run by itself to tick the card.\n");
+ fflush(stderr);
+ exit(0);
+ }
+ } else {
+ fprintf(stderr, "Watchdog Ticking Away!\n");
+ fflush(stderr);
+ }
+
+ while(1) {
+ keep_alive();
+ sleep(1);
+ }
+}
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index 21ed51173662..958ff3d48be3 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -34,22 +34,7 @@ activates as soon as /dev/watchdog is opened and will reboot unless
the watchdog is pinged within a certain time, this time is called the
timeout or margin. The simplest way to ping the watchdog is to write
some data to the device. So a very simple watchdog daemon would look
-like this:
-
-#include <stdlib.h>
-#include <fcntl.h>
-
-int main(int argc, const char *argv[]) {
- int fd=open("/dev/watchdog",O_WRONLY);
- if (fd==-1) {
- perror("watchdog");
- exit(1);
- }
- while(1) {
- write(fd, "\0", 1);
- sleep(10);
- }
-}
+like this source file: see Documentation/watchdog/src/watchdog-simple.c
A more advanced driver could for example check that a HTTP server is
still responding before doing the write call to ping the watchdog.
@@ -110,7 +95,40 @@ current timeout using the GETTIMEOUT ioctl.
ioctl(fd, WDIOC_GETTIMEOUT, &timeout);
printf("The timeout was is %d seconds\n", timeout);
-Envinronmental monitoring:
+Pretimeouts:
+
+Some watchdog timers can be set to have a trigger go off before the
+actual time they will reset the system. This can be done with an NMI,
+interrupt, or other mechanism. This allows Linux to record useful
+information (like panic information and kernel coredumps) before it
+resets.
+
+ pretimeout = 10;
+ ioctl(fd, WDIOC_SETPRETIMEOUT, &pretimeout);
+
+Note that the pretimeout is the number of seconds before the time
+when the timeout will go off. It is not the number of seconds until
+the pretimeout. So, for instance, if you set the timeout to 60 seconds
+and the pretimeout to 10 seconds, the pretimout will go of in 50
+seconds. Setting a pretimeout to zero disables it.
+
+There is also a get function for getting the pretimeout:
+
+ ioctl(fd, WDIOC_GETPRETIMEOUT, &timeout);
+ printf("The pretimeout was is %d seconds\n", timeout);
+
+Not all watchdog drivers will support a pretimeout.
+
+Get the number of seconds before reboot:
+
+Some watchdog drivers have the ability to report the remaining time
+before the system will reboot. The WDIOC_GETTIMELEFT is the ioctl
+that returns the number of seconds before reboot.
+
+ ioctl(fd, WDIOC_GETTIMELEFT, &timeleft);
+ printf("The timeout was is %d seconds\n", timeleft);
+
+Environmental monitoring:
All watchdog drivers are required return more information about the system,
some do temperature, fan and power level monitoring, some can tell you
@@ -169,6 +187,10 @@ The watchdog saw a keepalive ping since it was last queried.
WDIOF_SETTIMEOUT Can set/get the timeout
+The watchdog can do pretimeouts.
+
+ WDIOF_PRETIMEOUT Pretimeout (in seconds), get/set
+
For those drivers that return any bits set in the option field, the
GETSTATUS and GETBOOTSTATUS ioctls can be used to ask for the current
diff --git a/Documentation/watchdog/watchdog.txt b/Documentation/watchdog/watchdog.txt
index dffda29c8799..4b1ff69cc19a 100644
--- a/Documentation/watchdog/watchdog.txt
+++ b/Documentation/watchdog/watchdog.txt
@@ -65,28 +65,7 @@ The external event interfaces on the WDT boards are not currently supported.
Minor numbers are however allocated for it.
-Example Watchdog Driver
------------------------
-
-#include <stdio.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-int main(int argc, const char *argv[])
-{
- int fd=open("/dev/watchdog",O_WRONLY);
- if(fd==-1)
- {
- perror("watchdog");
- exit(1);
- }
- while(1)
- {
- write(fd,"\0",1);
- fsync(fd);
- sleep(10);
- }
-}
+Example Watchdog Driver: see Documentation/watchdog/src/watchdog-simple.c
Contact Information
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index f2cd6ef53ff3..6887d44d2661 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -205,6 +205,27 @@ IOMMU
pages Prereserve that many 128K pages for the software IO bounce buffering.
force Force all IO through the software TLB.
+ calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
+ calgary=[translate_empty_slots]
+ calgary=[disable=<PCI bus number>]
+
+ 64k,...,8M - Set the size of each PCI slot's translation table
+ when using the Calgary IOMMU. This is the size of the translation
+ table itself in main memory. The smallest table, 64k, covers an IO
+ space of 32MB; the largest, 8MB table, can cover an IO space of
+ 4GB. Normally the kernel will make the right choice by itself.
+
+ translate_empty_slots - Enable translation even on slots that have
+ no devices attached to them, in case a device will be hotplugged
+ in the future.
+
+ disable=<PCI bus number> - Disable translation on a given PHB. For
+ example, the built-in graphics adapter resides on the first bridge
+ (PCI bus number 0); if translation (isolation) is enabled on this
+ bridge, X servers that access the hardware directly from user
+ space might stop working. Use this option if you have devices that
+ are accessed from userspace directly on some PCI host bridge.
+
Debugging
oops=panic Always panic on oopses. Default is to just kill the process,
diff --git a/MAINTAINERS b/MAINTAINERS
index 4dcd2f1f14d6..31a13720f23c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1118,6 +1118,11 @@ L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.nu/
S: Maintained
+HARDWARE RANDOM NUMBER GENERATOR CORE
+P: Michael Buesch
+M: mb@bu3sch.de
+S: Maintained
+
HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
P: Robert Love
M: rlove@rlove.org
@@ -1396,7 +1401,8 @@ S: Supported
INPUT (KEYBOARD, MOUSE, JOYSTICK) DRIVERS
P: Dmitry Torokhov
-M: dtor_core@ameritech.net
+M: dmitry.torokhov@gmail.com
+M: dtor@mail.ru
L: linux-input@atrey.karlin.mff.cuni.cz
L: linux-joystick@atrey.karlin.mff.cuni.cz
T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
@@ -1436,6 +1442,11 @@ P: Tigran Aivazian
M: tigran@veritas.com
S: Maintained
+INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
+P: Deepak Saxena
+M: dsaxena@plexity.net
+S: Maintained
+
INTEL PRO/100 ETHERNET SUPPORT
P: John Ronciak
M: john.ronciak@intel.com
@@ -2725,6 +2736,11 @@ P: Christoph Hellwig
M: hch@infradead.org
S: Maintained
+TI OMAP RANDOM NUMBER GENERATOR SUPPORT
+P: Deepak Saxena
+M: dsaxena@plexity.net
+S: Maintained
+
TI PARALLEL LINK CABLE DRIVER
P: Romain Lievin
M: roms@lpg.ticalc.org
diff --git a/Makefile b/Makefile
index 1700d3f6ea22..e9560c6f8156 100644
--- a/Makefile
+++ b/Makefile
@@ -71,7 +71,7 @@ endif
# In both cases the working directory must be the root of the kernel src.
# 1) O=
# Use "make O=dir/to/store/output/files/"
-#
+#
# 2) Set KBUILD_OUTPUT
# Set the environment variable KBUILD_OUTPUT to point to the directory
# where the output files shall be placed.
@@ -178,18 +178,20 @@ CROSS_COMPILE ?=
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
+KCONFIG_CONFIG ?= .config
+
# SHELL used by kbuild
CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
-HOSTCC = gcc
-HOSTCXX = g++
-HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
-HOSTCXXFLAGS = -O2
+HOSTCC = gcc
+HOSTCXX = g++
+HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
+HOSTCXXFLAGS = -O2
-# Decide whether to build built-in, modular, or both.
-# Normally, just do built-in.
+# Decide whether to build built-in, modular, or both.
+# Normally, just do built-in.
KBUILD_MODULES :=
KBUILD_BUILTIN := 1
@@ -197,7 +199,7 @@ KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects.
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
-# make sure the checksums are uptodate before we record them.
+# make sure the checksums are up to date before we record them.
ifeq ($(MAKECMDGOALS),modules)
KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
@@ -230,7 +232,7 @@ export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
#
# If $(quiet) is empty, the whole command will be printed.
# If it is set to "quiet_", only the short version will be printed.
-# If it is set to "silent_", nothing wil be printed at all, since
+# If it is set to "silent_", nothing will be printed at all, since
# the variable $(silent_cmd_cc_o_c) doesn't exist.
#
# A simple variant is to prefix commands with $(Q) - that's useful
@@ -265,10 +267,9 @@ MAKEFLAGS += --include-dir=$(srctree)
# We need some generic definitions
include $(srctree)/scripts/Kbuild.include
-# For maximum performance (+ possibly random breakage, uncomment
-# the following)
-
-#MAKEFLAGS += -rR
+# Do not use make's built-in rules and variables
+# This increases performance and avoid hard-to-debug behavour
+MAKEFLAGS += -rR
# Make variables (CC, etc...)
@@ -305,21 +306,21 @@ LINUXINCLUDE := -Iinclude \
CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
-CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
- -fno-strict-aliasing -fno-common
-AFLAGS := -D__ASSEMBLY__
+CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common
+AFLAGS := -D__ASSEMBLY__
-# Read KERNELRELEASE from .kernelrelease (if it exists)
-KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null)
+# Read KERNELRELEASE from include/config/kernel.release (if it exists)
+KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
-export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION \
- ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
- CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
- HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
+export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
+export ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
+export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE
+export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
# When compiling out-of-tree modules, put MODVERDIR in the module
@@ -357,12 +358,13 @@ endif
# catch them early, and hand them over to scripts/kconfig/Makefile
# It is allowed to specify more targets when calling make, including
# mixing *config targets and build targets.
-# For example 'make oldconfig all'.
+# For example 'make oldconfig all'.
# Detect when mixed targets is specified, and make a second invocation
# of make so .config is not included in this case either (for *config).
no-dot-config-targets := clean mrproper distclean \
- cscope TAGS tags help %docs check%
+ cscope TAGS tags help %docs check% \
+ kernelrelease kernelversion
config-targets := 0
mixed-targets := 0
@@ -404,9 +406,8 @@ include $(srctree)/arch/$(ARCH)/Makefile
export KBUILD_DEFCONFIG
config %config: scripts_basic outputmakefile FORCE
- $(Q)mkdir -p include/linux
+ $(Q)mkdir -p include/linux include/config
$(Q)$(MAKE) $(build)=scripts/kconfig $@
- $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= .kernelrelease
else
# ===========================================================================
@@ -416,13 +417,11 @@ else
ifeq ($(KBUILD_EXTMOD),)
# Additional helpers built in scripts/
# Carefully list dependencies so we do not try to build scripts twice
-# in parrallel
+# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/MARKER
+scripts: scripts_basic include/config/auto.conf
$(Q)$(MAKE) $(build)=$(@)
-scripts_basic: include/linux/autoconf.h
-
# Objects we will link into vmlinux / subdirs we need to visit
init-y := init/
drivers-y := drivers/ sound/
@@ -436,31 +435,32 @@ ifeq ($(dot-config),1)
# Read in dependencies to all Kconfig* files, make sure to run
# oldconfig if changes are detected.
--include .kconfig.d
+-include include/config/auto.conf.cmd
+-include include/config/auto.conf
-include .config
-
-# If .config needs to be updated, it will be done via the dependency
-# that autoconf has on .config.
# To avoid any implicit rule to kick in, define an empty command
-.config .kconfig.d: ;
+$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
-# If .config is newer than include/linux/autoconf.h, someone tinkered
+# If .config is newer than include/config/auto.conf, someone tinkered
# with it and forgot to run make oldconfig.
-# If kconfig.d is missing then we are probarly in a cleaned tree so
+# if auto.conf.cmd is missing then we are probably in a cleaned tree so
# we execute the config step to be sure to catch updated Kconfig files
-include/linux/autoconf.h: .kconfig.d .config
- $(Q)mkdir -p include/linux
+include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
+ifeq ($(KBUILD_EXTMOD),)
$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
else
+ $(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it)
+endif
+
+else
# Dummy target needed, because used as prerequisite
-include/linux/autoconf.h: ;
+include/config/auto.conf: ;
endif
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
-# Defaults vmlinux but it is usually overriden in the arch makefile
+# Defaults vmlinux but it is usually overridden in the arch makefile
all: vmlinux
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
@@ -492,11 +492,11 @@ CHECKFLAGS += $(NOSTDINC_FLAGS)
# warn about C99 declaration after statement
CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
-# disable pointer signedness warnings in gcc 4.0
+# disable pointer signed / unsigned warnings in gcc 4.0
CFLAGS += $(call cc-option,-Wno-pointer-sign,)
# Default kernel image to build when no specific target is given.
-# KBUILD_IMAGE may be overruled on the commandline or
+# KBUILD_IMAGE may be overruled on the command line or
# set in the environment
# Also any assignments in arch/$(ARCH)/Makefile take precedence over
# this default value
@@ -510,12 +510,29 @@ export INSTALL_PATH ?= /boot
#
# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
# relocations required by build roots. This is not defined in the
-# makefile but the arguement can be passed to make if needed.
+# makefile but the argument can be passed to make if needed.
#
MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
export MODLIB
+#
+# INSTALL_MOD_STRIP, if defined, will cause modules to be
+# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
+# the default option --strip-debug will be used. Otherwise,
+# INSTALL_MOD_STRIP will used as the options to the strip command.
+
+ifdef INSTALL_MOD_STRIP
+ifeq ($(INSTALL_MOD_STRIP),1)
+mod_strip_cmd = $STRIP) --strip-debug
+else
+mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP)
+endif # INSTALL_MOD_STRIP=1
+else
+mod_strip_cmd = true
+endif # INSTALL_MOD_STRIP
+export mod_strip_cmd
+
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -539,7 +556,7 @@ libs-y := $(libs-y1) $(libs-y2)
# Build vmlinux
# ---------------------------------------------------------------------------
-# vmlinux is build from the objects selected by $(vmlinux-init) and
+# vmlinux is built from the objects selected by $(vmlinux-init) and
# $(vmlinux-main). Most are built-in.o files from top-level directories
# in the kernel tree, others are specified in arch/$(ARCH)Makefile.
# Ordering when linking is important, and $(vmlinux-init) must be first.
@@ -590,7 +607,7 @@ quiet_cmd_vmlinux_version = GEN .version
$(MAKE) $(build)=init
# Generate System.map
-quiet_cmd_sysmap = SYSMAP
+quiet_cmd_sysmap = SYSMAP
cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
# Link of vmlinux
@@ -719,7 +736,7 @@ $(vmlinux-dirs): prepare scripts
$(Q)$(MAKE) $(build)=$@
# Build the kernel release string
-# The KERNELRELEASE is stored in a file named .kernelrelease
+# The KERNELRELEASE is stored in a file named include/config/kernel.release
# to be used when executing for example make install or make modules_install
#
# Take the contents of any files called localversion* and the config
@@ -737,10 +754,10 @@ _localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
localver = $(subst $(space),, \
$(shell cat /dev/null $(_localver)) \
$(patsubst "%",%,$(CONFIG_LOCALVERSION)))
-
+
# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
# and if the SCM is know a tag from the SCM is appended.
-# The appended tag is determinded by the SCM used.
+# The appended tag is determined by the SCM used.
#
# Currently, only git is supported.
# Other SCMs can edit scripts/setlocalversion and add the appropriate
@@ -753,9 +770,9 @@ endif
localver-full = $(localver)$(localver-auto)
-# Store (new) KERNELRELASE string in .kernelrelease
+# Store (new) KERNELRELASE string in include/config/kernel.release
kernelrelease = $(KERNELVERSION)$(localver-full)
-.kernelrelease: FORCE
+include/config/kernel.release: include/config/auto.conf FORCE
$(Q)rm -f $@
$(Q)echo $(kernelrelease) > $@
@@ -776,10 +793,10 @@ PHONY += prepare-all
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
# 2) Create the include2 directory, used for the second asm symlink
-prepare3: .kernelrelease
+prepare3: include/config/kernel.release
ifneq ($(KBUILD_SRC),)
@echo ' Using $(srctree) as source for kernel'
- $(Q)if [ -f $(srctree)/.config ]; then \
+ $(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \
echo " $(srctree) is not clean, please run 'make mrproper'";\
echo " in the '$(srctree)' directory.";\
/bin/false; \
@@ -792,7 +809,7 @@ endif
prepare2: prepare3 outputmakefile
prepare1: prepare2 include/linux/version.h include/asm \
- include/config/MARKER
+ include/config/auto.conf
ifneq ($(KBUILD_MODULES),)
$(Q)mkdir -p $(MODVERDIR)
$(Q)rm -f $(MODVERDIR)/*
@@ -806,27 +823,20 @@ prepare0: archprepare FORCE
# All the preparing..
prepare prepare-all: prepare0
-# Leave this as default for preprocessing vmlinux.lds.S, which is now
-# done in arch/$(ARCH)/kernel/Makefile
+# Leave this as default for preprocessing vmlinux.lds.S, which is now
+# done in arch/$(ARCH)/kernel/Makefile
export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
-# FIXME: The asm symlink changes when $(ARCH) changes. That's
-# hard to detect, but I suppose "make mrproper" is a good idea
-# before switching between archs anyway.
+# FIXME: The asm symlink changes when $(ARCH) changes. That's
+# hard to detect, but I suppose "make mrproper" is a good idea
+# before switching between archs anyway.
include/asm:
@echo ' SYMLINK $@ -> include/asm-$(ARCH)'
$(Q)if [ ! -d include ]; then mkdir -p include; fi;
@ln -fsn asm-$(ARCH) $@
-# Split autoconf.h into include/linux/config/*
-
-include/config/MARKER: scripts/basic/split-include include/linux/autoconf.h
- @echo ' SPLIT include/linux/autoconf.h -> include/config/*'
- @scripts/basic/split-include include/linux/autoconf.h include/config
- @touch $@
-
# Generate some files
# ---------------------------------------------------------------------------
@@ -846,7 +856,7 @@ define filechk_version.h
)
endef
-include/linux/version.h: $(srctree)/Makefile .config .kernelrelease FORCE
+include/linux/version.h: $(srctree)/Makefile include/config/kernel.release FORCE
$(call filechk,version.h)
# ---------------------------------------------------------------------------
@@ -860,7 +870,7 @@ depend dep:
ifdef CONFIG_MODULES
-# By default, build modules as well
+# By default, build modules as well
all: modules
@@ -942,7 +952,7 @@ CLEAN_FILES += vmlinux System.map \
MRPROPER_DIRS += include/config include2
MRPROPER_FILES += .config .config.old include/asm .version .old_version \
include/linux/autoconf.h include/linux/version.h \
- .kernelrelease Module.symvers tags TAGS cscope*
+ Module.symvers tags TAGS cscope*
# clean - Delete most, but leave enough to build external modules
#
@@ -958,8 +968,9 @@ clean: archclean $(clean-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find . $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
- -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+ -o -name '*.symtypes' \) \
-type f -print | xargs rm -f
# mrproper - Delete all generated files, including .config
@@ -982,9 +993,9 @@ PHONY += distclean
distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
- \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
- -o -name '.*.rej' -o -size 0 \
+ -o -name '.*.rej' -o -size 0 \
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
@@ -994,9 +1005,9 @@ distclean: mrproper
# rpm target kept for backward compatibility
package-dir := $(srctree)/scripts/package
-%pkg: FORCE
+%pkg: include/config/kernel.release FORCE
$(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: FORCE
+rpm: include/config/kernel.release FORCE
$(Q)$(MAKE) $(build)=$(package-dir) $@
@@ -1077,7 +1088,7 @@ else # KBUILD_EXTMOD
# make M=dir modules Make all modules in specified dir
# make M=dir Same as 'make M=dir modules'
# make M=dir modules_install
-# Install the modules build in the module directory
+# Install the modules built in the module directory
# Assumes install directory is already created
# We are always building modules
@@ -1136,7 +1147,7 @@ clean: rm-dirs := $(MODVERDIR)
clean: $(clean-dirs)
$(call cmd,rmdirs)
@find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
-type f -print | xargs rm -f
@@ -1175,31 +1186,41 @@ else
ALLINCLUDE_ARCHS := $(ARCH)
endif
else
-#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behaviour.
+#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behavour.
ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS)
endif
ALLSOURCE_ARCHS := $(ARCH)
-define all-sources
- ( find $(__srctree) $(RCS_FIND_IGNORE) \
+define find-sources
+ ( find $(__srctree) $(RCS_FIND_IGNORE) \
\( -name include -o -name arch \) -prune -o \
- -name '*.[chS]' -print; \
+ -name $1 -print; \
for ARCH in $(ALLSOURCE_ARCHS) ; do \
find $(__srctree)arch/$${ARCH} $(RCS_FIND_IGNORE) \
- -name '*.[chS]' -print; \
+ -name $1 -print; \
done ; \
find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \
- -name '*.[chS]' -print; \
+ -name $1 -print; \
find $(__srctree)include $(RCS_FIND_IGNORE) \
\( -name config -o -name 'asm-*' \) -prune \
- -o -name '*.[chS]' -print; \
+ -o -name $1 -print; \
for ARCH in $(ALLINCLUDE_ARCHS) ; do \
find $(__srctree)include/asm-$${ARCH} $(RCS_FIND_IGNORE) \
- -name '*.[chS]' -print; \
+ -name $1 -print; \
done ; \
find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \
- -name '*.[chS]' -print )
+ -name $1 -print )
+endef
+
+define all-sources
+ $(call find-sources,'*.[chS]')
+endef
+define all-kconfigs
+ $(call find-sources,'Kconfig*')
+endef
+define all-defconfigs
+ $(call find-sources,'defconfig')
endef
quiet_cmd_cscope-file = FILELST cscope.files
@@ -1219,7 +1240,13 @@ define cmd_TAGS
echo "-I __initdata,__exitdata,__acquires,__releases \
-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
--extra=+f --c-kinds=+px"`; \
- $(all-sources) | xargs etags $$ETAGSF -a
+ $(all-sources) | xargs etags $$ETAGSF -a; \
+ if test "x$$ETAGSF" = x; then \
+ $(all-kconfigs) | xargs etags -a \
+ --regex='/^config[ \t]+\([a-zA-Z0-9_]+\)/\1/'; \
+ $(all-defconfigs) | xargs etags -a \
+ --regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'; \
+ fi
endef
TAGS: FORCE
@@ -1259,14 +1286,14 @@ namespacecheck:
endif #ifeq ($(config-targets),1)
endif #ifeq ($(mixed-targets),1)
-PHONY += checkstack
+PHONY += checkstack kernelrelease kernelversion
checkstack:
$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
$(PERL) $(src)/scripts/checkstack.pl $(ARCH)
kernelrelease:
- $(if $(wildcard .kernelrelease), $(Q)echo $(KERNELRELEASE), \
- $(error kernelrelease not valid - run 'make *config' to update it))
+ $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \
+ $(error kernelrelease not valid - run 'make prepare' to update it))
kernelversion:
@echo $(KERNELVERSION)
@@ -1301,6 +1328,8 @@ endif
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.o: %.S prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
# Modules
/ %/: prepare scripts FORCE
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index da677f829f76..63af36cf7f6e 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -49,15 +49,15 @@ select_smp_affinity(unsigned int irq)
static int last_cpu;
int cpu = last_cpu + 1;
- if (!irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
+ if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu))
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- irq_affinity[irq] = cpumask_of_cpu(cpu);
- irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu));
return 0;
}
#endif /* CONFIG_SMP */
@@ -93,7 +93,7 @@ show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq]);
#endif
- seq_printf(p, " %14s", irq_desc[irq].handler->typename);
+ seq_printf(p, " %14s", irq_desc[irq].chip->typename);
seq_printf(p, " %c%s",
(action->flags & SA_INTERRUPT)?'+':' ',
action->name);
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 9d34ce26e5ef..f20f2dff9c43 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -233,7 +233,7 @@ void __init
init_rtc_irq(void)
{
irq_desc[RTC_IRQ].status = IRQ_DISABLED;
- irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+ irq_desc[RTC_IRQ].chip = &rtc_irq_type;
setup_irq(RTC_IRQ, &timer_irqaction);
}
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index b188683b83fd..ac893bd48036 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -109,7 +109,7 @@ init_i8259a_irqs(void)
for (i = 0; i < 16; i++) {
irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].handler = &i8259a_irq_type;
+ irq_desc[i].chip = &i8259a_irq_type;
}
setup_irq(2, &cascade);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 146a20b9e3d5..3b581415bab0 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -120,7 +120,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
if ((ignore_mask >> i) & 1)
continue;
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &pyxis_irq_type;
+ irq_desc[i].chip = &pyxis_irq_type;
}
setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 0a87e466918c..8e4d121f84cc 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -67,7 +67,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &srm_irq_type;
+ irq_desc[i].chip = &srm_irq_type;
}
}
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 2a8b364c822e..4ea6711e55aa 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -124,12 +124,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
void
pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_controller *hose = dev->sysdata;
unsigned long alignto;
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
/* Make sure we start at our min on all hoses */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 558b83368559..254c507a608c 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -481,7 +481,7 @@ register_cpus(void)
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
- register_cpu(p, i, NULL);
+ register_cpu(p, i);
}
return 0;
}
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index d7f0e97fe56f..1a1a2c7a3d94 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -144,7 +144,7 @@ alcor_init_irq(void)
if (i >= 16+20 && i <= 16+30)
continue;
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &alcor_irq_type;
+ irq_desc[i].chip = &alcor_irq_type;
}
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 8e3374d34c95..8c9e443d93ad 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -124,7 +124,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r))
for (i = 16; i < 35; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &cabriolet_irq_type;
+ irq_desc[i].chip = &cabriolet_irq_type;
}
}
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index d5da6b1b28ee..b28c8f1c6e10 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -300,7 +300,7 @@ init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax)
long i;
for (i = imin; i <= imax; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = ops;
+ irq_desc[i].chip = ops;
}
}
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 61a79c354f0b..aeb8e0277905 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -137,7 +137,7 @@ eb64p_init_irq(void)
for (i = 16; i < 32; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &eb64p_irq_type;
+ irq_desc[i].chip = &eb64p_irq_type;
}
common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index bd6e5f0e43c7..64a785baf53a 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -154,7 +154,7 @@ eiger_init_irq(void)
for (i = 16; i < 128; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &eiger_irq_type;
+ irq_desc[i].chip = &eiger_irq_type;
}
}
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index fcabb7c96a16..0148e095638f 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -206,11 +206,11 @@ jensen_init_irq(void)
{
init_i8259a_irqs();
- irq_desc[1].handler = &jensen_local_irq_type;
- irq_desc[4].handler = &jensen_local_irq_type;
- irq_desc[3].handler = &jensen_local_irq_type;
- irq_desc[7].handler = &jensen_local_irq_type;
- irq_desc[9].handler = &jensen_local_irq_type;
+ irq_desc[1].chip = &jensen_local_irq_type;
+ irq_desc[4].chip = &jensen_local_irq_type;
+ irq_desc[3].chip = &jensen_local_irq_type;
+ irq_desc[7].chip = &jensen_local_irq_type;
+ irq_desc[9].chip = &jensen_local_irq_type;
common_init_isa_dma();
}
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index e32fee505220..36d215954376 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -303,7 +303,7 @@ init_io7_irqs(struct io7 *io7,
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[base + i].handler = lsi_ops;
+ irq_desc[base + i].chip = lsi_ops;
}
/* Disable the implemented irqs in hardware. */
@@ -317,7 +317,7 @@ init_io7_irqs(struct io7 *io7,
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[base + i].handler = msi_ops;
+ irq_desc[base + i].chip = msi_ops;
}
for (i = 0; i < 16; ++i)
@@ -335,7 +335,7 @@ marvel_init_irq(void)
/* Reserve the legacy irqs. */
for (i = 0; i < 16; ++i) {
irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].handler = &marvel_legacy_irq_type;
+ irq_desc[i].chip = &marvel_legacy_irq_type;
}
/* Init the io7 irqs. */
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index d78a0daa6168..b741600e3761 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -117,7 +117,7 @@ mikasa_init_irq(void)
for (i = 16; i < 32; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &mikasa_irq_type;
+ irq_desc[i].chip = &mikasa_irq_type;
}
init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 65061f5d7410..55db02d318d7 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -139,7 +139,7 @@ noritake_init_irq(void)
for (i = 16; i < 48; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &noritake_irq_type;
+ irq_desc[i].chip = &noritake_irq_type;
}
init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 05888a02a604..949607e3d6fb 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -180,7 +180,7 @@ rawhide_init_irq(void)
for (i = 16; i < 128; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &rawhide_irq_type;
+ irq_desc[i].chip = &rawhide_irq_type;
}
init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 58404243057b..6ae506052635 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -117,7 +117,7 @@ rx164_init_irq(void)
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &rx164_irq_type;
+ irq_desc[i].chip = &rx164_irq_type;
}
init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index a7ff84474ace..24dea40c9bfe 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -537,7 +537,7 @@ sable_lynx_init_irq(int nr_irqs)
for (i = 0; i < nr_irqs; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &sable_lynx_irq_type;
+ irq_desc[i].chip = &sable_lynx_irq_type;
}
common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 7955bdfc2db0..2c75cd1fd81a 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -154,7 +154,7 @@ takara_init_irq(void)
for (i = 16; i < 128; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = &takara_irq_type;
+ irq_desc[i].chip = &takara_irq_type;
}
common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 2551fb49ae09..13f3ed8ed7ac 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -189,7 +189,7 @@ init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
long i;
for (i = imin; i <= imax; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i].handler = ops;
+ irq_desc[i].chip = ops;
}
}
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 1553f470246e..22c5798fe083 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -199,14 +199,14 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
if (i == 2)
continue;
irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i+irq_bias].handler = &wildfire_irq_type;
+ irq_desc[i+irq_bias].chip = &wildfire_irq_type;
}
irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[36+irq_bias].handler = &wildfire_irq_type;
+ irq_desc[36+irq_bias].chip = &wildfire_irq_type;
for (i = 40; i < 64; ++i) {
irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
- irq_desc[i+irq_bias].handler = &wildfire_irq_type;
+ irq_desc[i+irq_bias].chip = &wildfire_irq_type;
}
setup_irq(32+irq_bias, &isa_enable);
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index ba788cfdc3c6..9fc0eeb4f0ab 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -112,7 +112,7 @@ op_axp_create_files(struct super_block * sb, struct dentry * root)
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
- char buf[3];
+ char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1b7e5c2e90ef..f123c7c9fc98 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,23 +188,27 @@ config ARCH_IMX
config ARCH_IOP3XX
bool "IOP3xx-based"
+ depends on MMU
select PCI
help
Support for Intel's IOP3XX (XScale) family of processors.
config ARCH_IXP4XX
bool "IXP4xx-based"
+ depends on MMU
help
Support for Intel's IXP4XX (XScale) family of processors.
config ARCH_IXP2000
bool "IXP2400/2800-based"
+ depends on MMU
select PCI
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
config ARCH_IXP23XX
bool "IXP23XX-based"
+ depends on MMU
select PCI
help
Support for Intel's IXP23xx (XScale) family of processors.
@@ -229,6 +233,7 @@ config ARCH_PNX4008
config ARCH_PXA
bool "PXA2xx-based"
+ depends on MMU
select ARCH_MTD_XIP
help
Support for Intel's PXA2XX processor line.
@@ -253,7 +258,7 @@ config ARCH_SA1100
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
- bool "Samsung S3C2410"
+ bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442"
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -339,6 +344,10 @@ config XSCALE_PMU
depends on CPU_XSCALE && !XSCALE_PMU_TIMER
default y
+if !MMU
+source "arch/arm/Kconfig-nommu"
+endif
+
endmenu
source "arch/arm/common/Kconfig"
@@ -372,7 +381,7 @@ config ISA_DMA_API
bool
config PCI
- bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB
+ bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX
help
Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 282b14e2f464..a3bbaaf480b9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -177,7 +177,7 @@ boot := arch/arm/boot
# them changed. We use .arch to indicate when they were updated
# last, otherwise make uses the target directory mtime.
-include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/MARKER
+include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/auto.conf
@echo ' SYMLINK include/asm-arm/arch -> include/asm-arm/$(INCDIR)'
ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p include/asm-arm
diff --git a/arch/arm/boot/compressed/head-at91rm9200.S b/arch/arm/boot/compressed/head-at91rm9200.S
index 57a3b163b2cb..d68b9acd826e 100644
--- a/arch/arm/boot/compressed/head-at91rm9200.S
+++ b/arch/arm/boot/compressed/head-at91rm9200.S
@@ -61,6 +61,12 @@
cmp r7, r3
beq 99f
+ @ Ajeco 1ARM : 1075
+ mov r3, #(MACH_TYPE_ONEARM & 0xff)
+ orr r3, r3, #(MACH_TYPE_ONEARM & 0xff00)
+ cmp r7, r3
+ beq 99f
+
@ Unknown board, use the AT91RM9200DK board
@ mov r7, #MACH_TYPE_AT91RM9200
mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff)
diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S
index d7bbd9da2fca..8517c8606b4a 100644
--- a/arch/arm/boot/compressed/ll_char_wr.S
+++ b/arch/arm/boot/compressed/ll_char_wr.S
@@ -77,7 +77,7 @@ Lrow4bpplp:
subne r1, r1, #1
ldrneb r7, [r6, r1]
bne Lrow4bpplp
- LOADREGS(fd, sp!, {r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
@
@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
@@ -105,7 +105,7 @@ Lrow8bpplp:
subne r1, r1, #1
ldrneb r7, [r6, r1]
bne Lrow8bpplp
- LOADREGS(fd, sp!, {r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
@
@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
@@ -127,7 +127,7 @@ Lrow1bpp:
strb r7, [r0], r5
mov r7, r7, lsr #8
strb r7, [r0], r5
- LOADREGS(fd, sp!, {r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
.bss
ENTRY(con_charconvtable)
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index a7dc1370695b..0dafba3a701d 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -629,21 +629,6 @@ static int locomo_resume(struct platform_device *dev)
#endif
-#define LCM_ALC_EN 0x8000
-
-void frontlight_set(struct locomo *lchip, int duty, int vr, int bpwf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lchip->lock, flags);
- locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
- udelay(100);
- locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
- locomo_writel(bpwf | LCM_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
- spin_unlock_irqrestore(&lchip->lock, flags);
-}
-
-
/**
* locomo_probe - probe for a single LoCoMo chip.
* @phys_addr: physical address of device.
@@ -698,14 +683,10 @@ __locomo_probe(struct device *me, struct resource *mem, int irq)
, lchip->base + LOCOMO_GPD);
locomo_writel(0, lchip->base + LOCOMO_GIE);
- /* FrontLight */
+ /* Frontlight */
locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
- /* Same constants can be used for collie and poodle
- (depending on CONFIG options in original sharp code)? */
- frontlight_set(lchip, 163, 0, 148);
-
/* Longtime timer */
locomo_writel(0, lchip->base + LOCOMO_LTINT);
/* SPI */
@@ -1063,6 +1044,30 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
}
/*
+ * Frontlight control
+ */
+
+static struct locomo *locomo_chip_driver(struct locomo_dev *ldev);
+
+void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
+{
+ unsigned long flags;
+ struct locomo *lchip = locomo_chip_driver(dev);
+
+ if (vr)
+ locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 1);
+ else
+ locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 0);
+
+ spin_lock_irqsave(&lchip->lock, flags);
+ locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+ udelay(100);
+ locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
+ locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+ spin_unlock_irqrestore(&lchip->lock, flags);
+}
+
+/*
* LoCoMo "Register Access Bus."
*
* We model this as a regular bus type, and hang devices directly
diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig
new file mode 100644
index 000000000000..5401c01caefe
--- /dev/null
+++ b/arch/arm/configs/onearm_defconfig
@@ -0,0 +1,1053 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.17-git10
+# Mon Jun 26 13:45:44 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_VECTORS_BASE=0xffff0000
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+CONFIG_ARCH_AT91RM9200=y
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+CONFIG_MACH_ONEARM=y
+# CONFIG_ARCH_AT91RM9200DK is not set
+# CONFIG_MACH_AT91RM9200EK is not set
+# CONFIG_MACH_CSB337 is not set
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+# CONFIG_MACH_KAFA is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+# CONFIG_LEDS_CPU is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp mem=64M"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+# CONFIG_APM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x0
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=0
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_AT91RM9200=y
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_UNWIND_INFO is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index e17661380096..f20814e6f497 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17
-# Tue Jun 20 18:57:01 2006
+# Linux kernel version: 2.6.17-git9
+# Sun Jun 25 23:56:32 2006
#
CONFIG_ARM=y
CONFIG_MMU=y
@@ -49,7 +49,6 @@ CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
-CONFIG_OBSOLETE_INTERMODULE=y
#
# Loadable module support
@@ -81,18 +80,26 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# System Type
#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91RM9200 is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_IOP3XX is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP23XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
@@ -100,14 +107,6 @@ CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_REALVIEW is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_AAEC2000 is not set
-# CONFIG_ARCH_AT91RM9200 is not set
-# CONFIG_ARCH_PNX4008 is not set
-# CONFIG_ARCH_NETX is not set
#
# S3C24XX Implementations
@@ -123,11 +122,14 @@ CONFIG_ARCH_SMDK2410=y
CONFIG_ARCH_S3C2440=y
CONFIG_SMDK2440_CPU2440=y
CONFIG_SMDK2440_CPU2442=y
+CONFIG_MACH_SMDK2413=y
CONFIG_MACH_VR1000=y
CONFIG_MACH_RX3715=y
CONFIG_MACH_OTOM=y
CONFIG_MACH_NEXCODER_2440=y
+CONFIG_S3C2410_CLOCK=y
CONFIG_CPU_S3C2410=y
+CONFIG_CPU_S3C2412=y
CONFIG_CPU_S3C244X=y
CONFIG_CPU_S3C2440=y
CONFIG_CPU_S3C2442=y
@@ -153,8 +155,11 @@ CONFIG_S3C2410_LOWLEVEL_UART_PORT=0
#
CONFIG_CPU_32=y
CONFIG_CPU_ARM920T=y
+CONFIG_CPU_ARM926T=y
CONFIG_CPU_32v4=y
+CONFIG_CPU_32v5=y
CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_ABRT_EV5TJ=y
CONFIG_CPU_CACHE_V4WT=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_COPY_V4WB=y
@@ -167,6 +172,7 @@ CONFIG_CPU_TLB_V4WBI=y
# CONFIG_CPU_ICACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
#
# Bus support
@@ -214,6 +220,7 @@ CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
CONFIG_FPE_NWFPE=y
# CONFIG_FPE_NWFPE_XP is not set
# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
#
# Userspace binary formats
@@ -242,6 +249,8 @@ CONFIG_NET=y
# CONFIG_NETDEBUG is not set
# CONFIG_PACKET is not set
CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -260,6 +269,8 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -267,6 +278,7 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
#
@@ -321,6 +333,7 @@ CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
#
# Connector - unified userspace <-> kernelspace linker
@@ -408,10 +421,12 @@ CONFIG_MTD_BAST_MAXSIZE=4
#
CONFIG_MTD_NAND=y
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_S3C2410=y
# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
# CONFIG_MTD_NAND_S3C2410_HWECC is not set
+# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
# CONFIG_MTD_NAND_DISKONCHIP is not set
# CONFIG_MTD_NAND_NANDSIM is not set
@@ -425,8 +440,8 @@ CONFIG_MTD_NAND_S3C2410=y
#
CONFIG_PARPORT=y
# CONFIG_PARPORT_PC is not set
-# CONFIG_PARPORT_ARC is not set
# CONFIG_PARPORT_GSC is not set
+# CONFIG_PARPORT_AX88796 is not set
CONFIG_PARPORT_1284=y
#
@@ -735,6 +750,7 @@ CONFIG_I2C_ALGOBIT=m
#
# CONFIG_I2C_ELEKTOR is not set
CONFIG_I2C_ISA=m
+# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_PARPORT is not set
# CONFIG_I2C_PARPORT_LIGHT is not set
CONFIG_I2C_S3C2410=y
@@ -765,13 +781,13 @@ CONFIG_SENSORS_EEPROM=m
#
# Dallas's 1-wire bus
#
-# CONFIG_W1 is not set
#
# Hardware Monitoring support
#
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
+# CONFIG_SENSORS_ABITUGURU is not set
# CONFIG_SENSORS_ADM1021 is not set
# CONFIG_SENSORS_ADM1025 is not set
# CONFIG_SENSORS_ADM1026 is not set
@@ -799,8 +815,10 @@ CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
# CONFIG_SENSORS_W83792D is not set
# CONFIG_SENSORS_W83L785TS is not set
# CONFIG_SENSORS_W83627HF is not set
@@ -845,6 +863,7 @@ CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_MACMODES is not set
CONFIG_FB_FIRMWARE_EDID=y
+# CONFIG_FB_BACKLIGHT is not set
CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_TILEBLITTING is not set
# CONFIG_FB_S1D13XXX is not set
@@ -976,10 +995,12 @@ CONFIG_USB_MON=y
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
+# CONFIG_USB_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set
@@ -1024,6 +1045,7 @@ CONFIG_FS_MBCACHE=y
# CONFIG_MINIX_FS is not set
CONFIG_ROMFS_FS=y
CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a601b8b55f35..7cffbaef064b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -22,6 +22,9 @@ obj-$(CONFIG_PCI) += bios32.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
+obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
+AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
+
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index c49b5d4d7fca..da69e660574b 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
/* user mem (segment) */
-EXPORT_SYMBOL(__arch_copy_from_user);
-EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__arch_clear_user);
-EXPORT_SYMBOL(__arch_strnlen_user);
-EXPORT_SYMBOL(__arch_strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
+#endif
/* crypto hash */
EXPORT_SYMBOL(sha_transform);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 396efba9bacd..447ede5143a8 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -60,6 +60,9 @@ int main(void)
#ifdef CONFIG_IWMMXT
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
#endif
+#ifdef CONFIG_CRUNCH
+ DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
+#endif
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 302fc1401547..45da06fc1ba1 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -304,7 +304,7 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev)
static void __devinit
pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
{
- unsigned long offset;
+ resource_size_t offset;
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
@@ -634,9 +634,9 @@ char * __init pcibios_setup(char *str)
* which might be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
new file mode 100644
index 000000000000..a26886758c67
--- /dev/null
+++ b/arch/arm/kernel/crunch-bits.S
@@ -0,0 +1,305 @@
+/*
+ * arch/arm/kernel/crunch-bits.S
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
+ * Copyright (c) 2003-2004, MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch/ep93xx-regs.h>
+
+/*
+ * We can't use hex constants here due to a bug in gas.
+ */
+#define CRUNCH_MVDX0 0
+#define CRUNCH_MVDX1 8
+#define CRUNCH_MVDX2 16
+#define CRUNCH_MVDX3 24
+#define CRUNCH_MVDX4 32
+#define CRUNCH_MVDX5 40
+#define CRUNCH_MVDX6 48
+#define CRUNCH_MVDX7 56
+#define CRUNCH_MVDX8 64
+#define CRUNCH_MVDX9 72
+#define CRUNCH_MVDX10 80
+#define CRUNCH_MVDX11 88
+#define CRUNCH_MVDX12 96
+#define CRUNCH_MVDX13 104
+#define CRUNCH_MVDX14 112
+#define CRUNCH_MVDX15 120
+#define CRUNCH_MVAX0L 128
+#define CRUNCH_MVAX0M 132
+#define CRUNCH_MVAX0H 136
+#define CRUNCH_MVAX1L 140
+#define CRUNCH_MVAX1M 144
+#define CRUNCH_MVAX1H 148
+#define CRUNCH_MVAX2L 152
+#define CRUNCH_MVAX2M 156
+#define CRUNCH_MVAX2H 160
+#define CRUNCH_MVAX3L 164
+#define CRUNCH_MVAX3M 168
+#define CRUNCH_MVAX3H 172
+#define CRUNCH_DSPSC 176
+
+#define CRUNCH_SIZE 184
+
+ .text
+
+/*
+ * Lazy switching of crunch coprocessor context
+ *
+ * r10 = struct thread_info pointer
+ * r9 = ret_from_exception
+ * lr = undefined instr exit
+ *
+ * called from prefetch exception handler with interrupts disabled
+ */
+ENTRY(crunch_task_enable)
+ ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
+
+ ldr r1, [r8, #0x80]
+ tst r1, #0x00800000 @ access to crunch enabled?
+ movne pc, lr @ if so no business here
+ mov r3, #0xaa @ unlock syscon swlock
+ str r3, [r8, #0xc0]
+ orr r1, r1, #0x00800000 @ enable access to crunch
+ str r1, [r8, #0x80]
+
+ ldr r3, =crunch_owner
+ add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r2, [sp, #60] @ current task pc value
+ ldr r1, [r3] @ get current crunch owner
+ str r0, [r3] @ this task now owns crunch
+ sub r2, r2, #4 @ adjust pc back
+ str r2, [sp, #60]
+
+ ldr r2, [r8, #0x80]
+ mov r2, r2 @ flush out enable (@@@)
+
+ teq r1, #0 @ test for last ownership
+ mov lr, r9 @ normal exit from exception
+ beq crunch_load @ no owner, skip save
+
+crunch_save:
+ cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
+ cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
+ cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
+ cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
+ cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
+ cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
+ cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
+ cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
+ cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
+ cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
+ cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
+ cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
+ cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
+ cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
+ cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
+ cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
+
+#ifdef __ARMEB__
+#error fix me for ARMEB
+#endif
+
+ cfmv32al mvfx0, mvax0 @ save 72b accumulators
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
+ cfmv32am mvfx0, mvax0
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
+ cfmv32ah mvfx0, mvax0
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
+ cfmv32al mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
+ cfmv32am mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
+ cfmv32ah mvfx0, mvax1
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
+ cfmv32al mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
+ cfmv32am mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
+ cfmv32ah mvfx0, mvax2
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
+ cfmv32al mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
+ cfmv32am mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
+ cfmv32ah mvfx0, mvax3
+ cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
+
+ cfmv32sc mvdx0, dspsc @ save status word
+ cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
+
+ teq r0, #0 @ anything to load?
+ cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
+ moveq pc, lr
+
+crunch_load:
+ cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
+ cfmvsc32 dspsc, mvdx0
+
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
+ cfmval32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
+ cfmvam32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
+ cfmvah32 mvax0, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
+ cfmval32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
+ cfmvam32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
+ cfmvah32 mvax1, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
+ cfmval32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
+ cfmvam32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
+ cfmvah32 mvax2, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
+ cfmval32 mvax3, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
+ cfmvam32 mvax3, mvfx0
+ cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
+ cfmvah32 mvax3, mvfx0
+
+ cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
+ cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
+ cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
+ cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
+ cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
+ cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
+ cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
+ cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
+ cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
+ cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
+ cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
+ cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
+ cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
+ cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
+ cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
+ cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
+
+ mov pc, lr
+
+/*
+ * Back up crunch regs to save area and disable access to them
+ * (mainly for gdb or sleep mode usage)
+ *
+ * r0 = struct thread_info pointer of target task or NULL for any
+ */
+ENTRY(crunch_task_disable)
+ stmfd sp!, {r4, r5, lr}
+
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r1, [r3] @ get current crunch owner
+ teq r1, #0 @ any current owner?
+ beq 1f @ no: quit
+ teq r0, #0 @ any owner?
+ teqne r1, r2 @ or specified one?
+ bne 1f @ no: quit
+
+ ldr r5, [r4, #0x80] @ enable access to crunch
+ mov r2, #0xaa
+ str r2, [r4, #0xc0]
+ orr r5, r5, #0x00800000
+ str r5, [r4, #0x80]
+
+ mov r0, #0 @ nothing to load
+ str r0, [r3] @ no more current owner
+ ldr r2, [r4, #0x80] @ flush out enable (@@@)
+ mov r2, r2
+ bl crunch_save
+
+ mov r2, #0xaa @ disable access to crunch
+ str r2, [r4, #0xc0]
+ bic r5, r5, #0x00800000
+ str r5, [r4, #0x80]
+ ldr r5, [r4, #0x80] @ flush out enable (@@@)
+ mov r5, r5
+
+1: msr cpsr_c, ip @ restore interrupt mode
+ ldmfd sp!, {r4, r5, pc}
+
+/*
+ * Copy crunch state to given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to store crunch state
+ *
+ * this is called mainly in the creation of signal stack frames
+ */
+ENTRY(crunch_task_copy)
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r3, [r3] @ get current crunch owner
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ current crunch values are in the task save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r1
+ mov r1, r2
+ mov r2, #CRUNCH_SIZE
+ b memcpy
+
+1: @ this task owns crunch regs -- grab a copy from there
+ mov r0, #0 @ nothing to load
+ mov r3, lr @ preserve return address
+ bl crunch_save
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
+
+/*
+ * Restore crunch state from given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to get crunch state from
+ *
+ * this is used to restore crunch state when unwinding a signal stack frame
+ */
+ENTRY(crunch_task_restore)
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =crunch_owner
+ add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
+ ldr r3, [r3] @ get current crunch owner
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ this task doesn't own crunch regs -- use its save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r2
+ mov r2, #CRUNCH_SIZE
+ b memcpy
+
+1: @ this task owns crunch regs -- load them directly
+ mov r0, r1
+ mov r1, #0 @ nothing to save
+ mov r3, lr @ preserve return address
+ bl crunch_load
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
new file mode 100644
index 000000000000..748175921f9b
--- /dev/null
+++ b/arch/arm/kernel/crunch.c
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/kernel/crunch.c
+ * Cirrus MaverickCrunch context switching and handling
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/arch/ep93xx-regs.h>
+#include <asm/thread_notify.h>
+#include <asm/io.h>
+
+struct crunch_state *crunch_owner;
+
+void crunch_task_release(struct thread_info *thread)
+{
+ local_irq_disable();
+ if (crunch_owner == &thread->crunchstate)
+ crunch_owner = NULL;
+ local_irq_enable();
+}
+
+static int crunch_enabled(u32 devcfg)
+{
+ return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE);
+}
+
+static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = (struct thread_info *)t;
+ struct crunch_state *crunch_state;
+ u32 devcfg;
+
+ crunch_state = &thread->crunchstate;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ memset(crunch_state, 0, sizeof(*crunch_state));
+
+ /*
+ * FALLTHROUGH: Ensure we don't try to overwrite our newly
+ * initialised state information on the first fault.
+ */
+
+ case THREAD_NOTIFY_RELEASE:
+ crunch_task_release(thread);
+ break;
+
+ case THREAD_NOTIFY_SWITCH:
+ devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG);
+ if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
+ devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
+ __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
+ __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block crunch_notifier_block = {
+ .notifier_call = crunch_do,
+};
+
+static int __init crunch_init(void)
+{
+ thread_register_notifier(&crunch_notifier_block);
+
+ return 0;
+}
+
+late_initcall(crunch_init);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 86c92523a346..6423a38839b8 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -492,9 +492,15 @@ call_fpe:
b do_fpe @ CP#1 (FPE)
b do_fpe @ CP#2 (FPE)
mov pc, lr @ CP#3
+#ifdef CONFIG_CRUNCH
+ b crunch_task_enable @ CP#4 (MaverickCrunch)
+ b crunch_task_enable @ CP#5 (MaverickCrunch)
+ b crunch_task_enable @ CP#6 (MaverickCrunch)
+#else
mov pc, lr @ CP#4
mov pc, lr @ CP#5
mov pc, lr @ CP#6
+#endif
mov pc, lr @ CP#7
mov pc, lr @ CP#8
mov pc, lr @ CP#9
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b5bcebca1cd6..75af6d6e2f28 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -340,7 +340,7 @@ sys_mmap2:
streq r5, [sp, #4]
beq do_mmap2
mov r0, #-EINVAL
- RETINSTR(mov,pc, lr)
+ mov pc, lr
#else
str r5, [sp, #4]
b do_mmap2
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index adf62e5eaad7..2af7e44218af 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -39,7 +39,7 @@
__INIT
.type stext, %function
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04f7344e356a..330b9476c398 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,7 +71,7 @@
__INIT
.type stext, %function
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -104,7 +104,7 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index a1d1b2906e8d..c40bdc770054 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -634,6 +634,32 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
#endif
+#ifdef CONFIG_CRUNCH
+/*
+ * Get the child Crunch state.
+ */
+static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+
+ crunch_task_disable(thread); /* force it to ram */
+ return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
+ ? -EFAULT : 0;
+}
+
+/*
+ * Set the child Crunch state.
+ */
+static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+
+ crunch_task_release(thread); /* force a reload */
+ return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
+ ? -EFAULT : 0;
+}
+#endif
+
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
@@ -765,6 +791,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->ptrace_message = data;
break;
+#ifdef CONFIG_CRUNCH
+ case PTRACE_GETCRUNCHREGS:
+ ret = ptrace_getcrunchregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETCRUNCHREGS:
+ ret = ptrace_setcrunchregs(child, (void __user *)data);
+ break;
+#endif
+
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9fc9af88c60c..6bdf70def01f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -119,9 +119,24 @@ DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
* Standard memory resources
*/
static struct resource mem_res[] = {
- { "Video RAM", 0, 0, IORESOURCE_MEM },
- { "Kernel text", 0, 0, IORESOURCE_MEM },
- { "Kernel data", 0, 0, IORESOURCE_MEM }
+ {
+ .name = "Video RAM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel text",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ }
};
#define video_ram mem_res[0]
@@ -129,9 +144,24 @@ static struct resource mem_res[] = {
#define kernel_data mem_res[2]
static struct resource io_res[] = {
- { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
- { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
- { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
+ {
+ .name = "reserved",
+ .start = 0x3bc,
+ .end = 0x3be,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "reserved",
+ .start = 0x378,
+ .end = 0x37f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "reserved",
+ .start = 0x278,
+ .end = 0x27f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }
};
#define lp0 io_res[0]
@@ -808,7 +838,7 @@ static int __init topology_init(void)
int cpu;
for_each_possible_cpu(cpu)
- register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
+ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1ce05ec086c6..83a8d3c95eb3 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -132,6 +132,37 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
return ret;
}
+#ifdef CONFIG_CRUNCH
+static int preserve_crunch_context(struct crunch_sigframe *frame)
+{
+ char kbuf[sizeof(*frame) + 8];
+ struct crunch_sigframe *kframe;
+
+ /* the crunch context must be 64 bit aligned */
+ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+ kframe->magic = CRUNCH_MAGIC;
+ kframe->size = CRUNCH_STORAGE_SIZE;
+ crunch_task_copy(current_thread_info(), &kframe->storage);
+ return __copy_to_user(frame, kframe, sizeof(*frame));
+}
+
+static int restore_crunch_context(struct crunch_sigframe *frame)
+{
+ char kbuf[sizeof(*frame) + 8];
+ struct crunch_sigframe *kframe;
+
+ /* the crunch context must be 64 bit aligned */
+ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+ if (__copy_from_user(kframe, frame, sizeof(*frame)))
+ return -1;
+ if (kframe->magic != CRUNCH_MAGIC ||
+ kframe->size != CRUNCH_STORAGE_SIZE)
+ return -1;
+ crunch_task_restore(current_thread_info(), &kframe->storage);
+ return 0;
+}
+#endif
+
#ifdef CONFIG_IWMMXT
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
@@ -214,6 +245,10 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= !valid_user_regs(regs);
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+ if (err == 0)
+ err |= restore_crunch_context(&aux->crunch);
+#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= restore_iwmmxt_context(&aux->iwmmxt);
@@ -333,6 +368,10 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+ if (err == 0)
+ err |= preserve_crunch_context(&aux->crunch);
+#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= preserve_iwmmxt_context(&aux->iwmmxt);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 2b254e88595c..2df9688a7028 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -80,6 +80,10 @@ SECTIONS
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
+#ifndef CONFIG_MMU
+ *(.fixup)
+ *(__ex_table)
+#endif
}
.text : { /* Real text segment */
@@ -87,7 +91,9 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+#ifdef CONFIG_MMU
*(.fixup)
+#endif
*(.gnu.warning)
*(.rodata)
*(.rodata.*)
@@ -142,7 +148,9 @@ SECTIONS
*/
. = ALIGN(32);
__start___ex_table = .;
+#ifdef CONFIG_MMU
*(__ex_table)
+#endif
__stop___ex_table = .;
/*
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 7b726b627ea5..30351cd4560d 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,28 +6,31 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- copy_page.o delay.o findbit.o memchr.o memcpy.o \
+ delay.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
- getuser.o putuser.o clear_user.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o sha1.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+mmu-y := clear_user.o copy_page.o getuser.o putuser.o
+
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
- lib-y += uaccess.o
+ mmu-y += uaccess.o
endif
endif
+lib-$(CONFIG_MMU) += $(mmu-y)
+
ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 16153c86c3f8..91f993f2e9db 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -41,7 +41,7 @@ ENTRY(c_backtrace)
movne r0, #0
movs frame, r0
1: moveq r0, #-2
- LOADREGS(eqfd, sp!, {r4 - r8, pc})
+ ldmeqfd sp!, {r4 - r8, pc}
2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
ldr r0, [sp], #4
@@ -85,7 +85,7 @@ ENTRY(c_backtrace)
* A zero next framepointer means we're done.
*/
teq next, #0
- LOADREGS(eqfd, sp!, {r4 - r8, pc})
+ ldmeqfd sp!, {r4 - r8, pc}
/*
* The next framepointer must be above the
@@ -97,16 +97,13 @@ ENTRY(c_backtrace)
b 1007f
/*
- * Fixup for LDMDB
+ * Fixup for LDMDB. Note that this must not be in the fixup section.
*/
- .section .fixup,"ax"
- .align 0
1007: ldr r0, =.Lbad
mov r1, frame
bl printk
- LOADREGS(fd, sp!, {r4 - r8, pc})
+ ldmfd sp!, {r4 - r8, pc}
.ltorg
- .previous
.section __ex_table,"a"
.align 3
@@ -145,7 +142,7 @@ ENTRY(c_backtrace)
adrne r0, .Lcr
blne printk
mov r0, stack
- LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
+ ldmfd sp!, {instr, reg, stack, r7, r8, pc}
.Lfp: .asciz " r%d = %08X%c"
.Lcr: .asciz "\n"
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 7ff9f831b3f9..ecb28dcdaf7b 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,13 +12,13 @@
.text
-/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+/* Prototype: int __clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
-ENTRY(__arch_clear_user)
+ENTRY(__clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -43,10 +43,10 @@ USER( strnebt r2, [r0], #1)
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
USER( strnebt r2, [r0], #1)
mov r0, #0
- LOADREGS(fd,sp!, {r1, pc})
+ ldmfd sp!, {r1, pc}
.section .fixup,"ax"
.align 0
-9001: LOADREGS(fd,sp!, {r0, pc})
+9001: ldmfd sp!, {r0, pc}
.previous
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7497393a0e81..6b7363ce749c 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __arch_copy_from_user(void *to, const void *from, size_t n)
+ * size_t __copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -83,7 +83,7 @@
.text
-ENTRY(__arch_copy_from_user)
+ENTRY(__copy_from_user)
#include "copy_template.S"
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 68117968482b..666c99cc0744 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -43,4 +43,4 @@ ENTRY(copy_page)
bgt 1b @ 1
PLD( ldmeqia r1!, {r3, r4, ip, lr} )
PLD( beq 2b )
- LOADREGS(fd, sp!, {r4, pc}) @ 3
+ ldmfd sp!, {r4, pc} @ 3
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 4a6d8ea14022..5224d94688d9 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __arch_copy_to_user(void *to, const void *from, size_t n)
+ * size_t __copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -86,7 +86,7 @@
.text
-ENTRY(__arch_copy_to_user)
+ENTRY(__copy_to_user)
#include "copy_template.S"
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 7065a20ee8ad..9621469beec1 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -28,5 +28,5 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, r3
adcs r0, r0, r2
adcs r0, r0, #0
- LOADREGS(fd, sp!, {pc})
+ ldmfd sp!, {pc}
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 9183b06c0e2f..930a70259220 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -31,7 +31,7 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
mov r2, r2, lsr #10 @ max = 0x00007fff
mul r0, r2, r0 @ max = 2^32-1
movs r0, r0, lsr #6
- RETINSTR(moveq,pc,lr)
+ moveq pc, lr
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -43,20 +43,20 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
ENTRY(__delay)
subs r0, r0, #1
#if 0
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
#endif
bhi __delay
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/ecard.S b/arch/arm/lib/ecard.S
index fb7b602a6f76..c55aaa2a2088 100644
--- a/arch/arm/lib/ecard.S
+++ b/arch/arm/lib/ecard.S
@@ -29,7 +29,7 @@ ENTRY(ecard_loader_read)
CPSR2SPSR(r0)
mov lr, pc
mov pc, r2
- LOADREGS(fd, sp!, {r4 - r12, pc})
+ ldmfd sp!, {r4 - r12, pc}
@ Purpose: call an expansion card loader to reset the card
@ Proto : void read_loader(int card_base, char *loader);
@@ -41,5 +41,5 @@ ENTRY(ecard_loader_reset)
CPSR2SPSR(r0)
mov lr, pc
add pc, r1, #8
- LOADREGS(fd, sp!, {r4 - r12, pc})
+ ldmfd sp!, {r4 - r12, pc}
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 6f8e27a58c78..a5ca0248aa4e 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -32,7 +32,7 @@ ENTRY(_find_first_zero_bit_le)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
/*
* Purpose : Find next 'zero' bit
@@ -66,7 +66,7 @@ ENTRY(_find_first_bit_le)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
/*
* Purpose : Find next 'one' bit
@@ -98,7 +98,7 @@ ENTRY(_find_first_zero_bit_be)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
ENTRY(_find_next_zero_bit_be)
teq r1, #0
@@ -126,7 +126,7 @@ ENTRY(_find_first_bit_be)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
ENTRY(_find_next_bit_be)
teq r1, #0
@@ -164,5 +164,5 @@ ENTRY(_find_next_bit_be)
addeq r2, r2, #1
mov r0, r2
#endif
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index d3d8de71a2c8..fb966ad0276f 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
bpl .Linsb_16_lp
tst r2, #15
- LOADREGS(eqfd, sp!, {r4 - r6, pc})
+ ldmeqfd sp!, {r4 - r6, pc}
.Linsb_no_16: tst r2, #8
beq .Linsb_no_8
@@ -109,7 +109,7 @@ ENTRY(__raw_readsb)
str r3, [r1], #4
.Linsb_no_4: ands r2, r2, #3
- LOADREGS(eqfd, sp!, {r4 - r6, pc})
+ ldmeqfd sp!, {r4 - r6, pc}
cmp r2, #2
ldrb r3, [r0]
@@ -119,4 +119,4 @@ ENTRY(__raw_readsb)
ldrgtb r3, [r0]
strgtb r3, [r1]
- LOADREGS(fd, sp!, {r4 - r6, pc})
+ ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 146d47c15455..4ef904185142 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -28,7 +28,7 @@
strb r3, [r1], #1
subs r2, r2, #1
- RETINSTR(moveq, pc, lr)
+ moveq pc, lr
ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
@@ -69,7 +69,7 @@ ENTRY(__raw_readsw)
bpl .Linsw_8_lp
tst r2, #7
- LOADREGS(eqfd, sp!, {r4, r5, r6, pc})
+ ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
@@ -102,6 +102,6 @@ ENTRY(__raw_readsw)
movne r3, r3, lsr #8
strneb r3, [r1]
- LOADREGS(fd, sp!, {r4, r5, r6, pc})
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 08209fc640ea..7eba2b6cc69f 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
bpl .Loutsb_16_lp
tst r2, #15
- LOADREGS(eqfd, sp!, {r4, r5, pc})
+ ldmeqfd sp!, {r4, r5, pc}
.Loutsb_no_16: tst r2, #8
beq .Loutsb_no_8
@@ -80,7 +80,7 @@ ENTRY(__raw_writesb)
outword r3
.Loutsb_no_4: ands r2, r2, #3
- LOADREGS(eqfd, sp!, {r4, r5, pc})
+ ldmeqfd sp!, {r4, r5, pc}
cmp r2, #2
ldrb r3, [r1], #1
@@ -90,4 +90,4 @@ ENTRY(__raw_writesb)
ldrgtb r3, [r1]
strgtb r3, [r0]
- LOADREGS(fd, sp!, {r4, r5, pc})
+ ldmfd sp!, {r4, r5, pc}
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 52d62b481295..1607a29f49b7 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -29,7 +29,7 @@
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
- RETINSTR(moveq, pc, lr)
+ moveq pc, lr
ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
@@ -80,7 +80,7 @@ ENTRY(__raw_writesw)
bpl .Loutsw_8_lp
tst r2, #7
- LOADREGS(eqfd, sp!, {r4, r5, r6, pc})
+ ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
@@ -124,4 +124,4 @@ ENTRY(__raw_writesw)
orrne ip, ip, ip, lsr #16
strne ip, [r0]
- LOADREGS(fd, sp!, {r4, r5, r6, pc})
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index ac34fe55d21a..e7ab1ea8ebaa 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,4 +22,4 @@ ENTRY(memchr)
bne 1b
sub r0, r0, #1
2: movne r0, #0
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a1795f599937..95b110b07a89 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -53,7 +53,7 @@ ENTRY(memset)
stmgeia r0!, {r1, r3, ip, lr}
stmgeia r0!, {r1, r3, ip, lr}
bgt 2b
- LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go.
+ ldmeqfd sp!, {pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
@@ -77,4 +77,4 @@ ENTRY(memset)
strneb r1, [r0], #1
tst r2, #1
strneb r1, [r0], #1
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 51ccc60160fd..abf2508e8221 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -53,7 +53,7 @@ ENTRY(__memzero)
stmgeia r0!, {r2, r3, ip, lr} @ 4
stmgeia r0!, {r2, r3, ip, lr} @ 4
bgt 3b @ 1
- LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit
+ ldmeqfd sp!, {pc} @ 1/2 quick exit
/*
* No need to correct the count; we're only testing bits from now on
*/
@@ -77,4 +77,4 @@ ENTRY(__memzero)
strneb r2, [r0], #1 @ 1
tst r1, #1 @ 1 a byte left over
strneb r2, [r0], #1 @ 1
- RETINSTR(mov,pc,lr) @ 1
+ mov pc, lr @ 1
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 5b9b493733fc..9f18d6fdee6a 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,4 +23,4 @@ ENTRY(strchr)
teq r2, r1
movne r0, #0
subeq r0, r0, #1
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
index 629cc8775276..36e3741a3772 100644
--- a/arch/arm/lib/strncpy_from_user.S
+++ b/arch/arm/lib/strncpy_from_user.S
@@ -20,8 +20,7 @@
* returns the number of characters copied (strlen of copied string),
* -EFAULT on exception, or "len" if we fill the whole buffer
*/
-ENTRY(__arch_strncpy_from_user)
- save_lr
+ENTRY(__strncpy_from_user)
mov ip, r1
1: subs r2, r2, #1
USER( ldrplbt r3, [r1], #1)
@@ -31,13 +30,13 @@ USER( ldrplbt r3, [r1], #1)
bne 1b
sub r1, r1, #1 @ take NUL character out of count
2: sub r0, r1, ip
- restore_pc
+ mov pc, lr
.section .fixup,"ax"
.align 0
9001: mov r3, #0
strb r3, [r0, #0] @ null terminate
mov r0, #-EFAULT
- restore_pc
+ mov pc, lr
.previous
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
index 67bcd8268128..18d8fa4f925a 100644
--- a/arch/arm/lib/strnlen_user.S
+++ b/arch/arm/lib/strnlen_user.S
@@ -14,14 +14,13 @@
.text
.align 5
-/* Prototype: unsigned long __arch_strnlen_user(const char *str, long n)
+/* Prototype: unsigned long __strnlen_user(const char *str, long n)
* Purpose : get length of a string in user memory
* Params : str - address of string in user memory
* Returns : length of string *including terminator*
* or zero on exception, or n + 1 if too long
*/
-ENTRY(__arch_strnlen_user)
- save_lr
+ENTRY(__strnlen_user)
mov r2, r0
1:
USER( ldrbt r3, [r0], #1)
@@ -31,10 +30,10 @@ USER( ldrbt r3, [r0], #1)
bne 1b
add r0, r0, #1
2: sub r0, r0, r2
- restore_pc
+ mov pc, lr
.section .fixup,"ax"
.align 0
9001: mov r0, #0
- restore_pc
+ mov pc, lr
.previous
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index fa923f026f15..538df220aa48 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,4 +22,4 @@ ENTRY(strrchr)
teq r2, #0
bne 1b
mov r0, r3
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
index 0cc450f863b6..b48bd6d5fd83 100644
--- a/arch/arm/lib/uaccess.S
+++ b/arch/arm/lib/uaccess.S
@@ -19,7 +19,7 @@
#define PAGE_SHIFT 12
-/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
* Purpose : copy a block to user memory from kernel memory
* Params : to - user memory
* : from - kernel memory
@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
sub r2, r2, ip
b .Lc2u_dest_aligned
-ENTRY(__arch_copy_to_user)
+ENTRY(__copy_to_user)
stmfd sp!, {r2, r4 - r7, lr}
cmp r2, #4
blt .Lc2u_not_enough
@@ -105,7 +105,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
movs ip, r2
bne .Lc2u_nowords
.Lc2u_finished: mov r0, #0
- LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+ ldmfd sp!, {r2, r4 - r7, pc}
.Lc2u_src_not_aligned:
bic r1, r1, #3
@@ -280,10 +280,10 @@ USER( strgtbt r3, [r0], #1) @ May fault
.section .fixup,"ax"
.align 0
-9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+9001: ldmfd sp!, {r0, r4 - r7, pc}
.previous
-/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
* Purpose : copy a block from user memory to kernel memory
* Params : to - kernel memory
* : from - user memory
@@ -302,7 +302,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
sub r2, r2, ip
b .Lcfu_dest_aligned
-ENTRY(__arch_copy_from_user)
+ENTRY(__copy_from_user)
stmfd sp!, {r0, r2, r4 - r7, lr}
cmp r2, #4
blt .Lcfu_not_enough
@@ -369,7 +369,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
bne .Lcfu_nowords
.Lcfu_finished: mov r0, #0
add sp, sp, #8
- LOADREGS(fd,sp!,{r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
.Lcfu_src_not_aligned:
bic r1, r1, #3
@@ -556,6 +556,6 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
movne r1, r4
blne __memzero
mov r0, r4
- LOADREGS(fd,sp!, {r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
.previous
diff --git a/arch/arm/mach-at91rm9200/Kconfig b/arch/arm/mach-at91rm9200/Kconfig
index 1ab5b7828318..70d402f76ce5 100644
--- a/arch/arm/mach-at91rm9200/Kconfig
+++ b/arch/arm/mach-at91rm9200/Kconfig
@@ -4,6 +4,12 @@ menu "AT91RM9200 Implementations"
comment "AT91RM9200 Board Type"
+config MACH_ONEARM
+ bool "Ajeco 1ARM Single Board Computer"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Ajeco's 1ARM Single Board Computer
+
config ARCH_AT91RM9200DK
bool "Atmel AT91RM9200-DK Development board"
depends on ARCH_AT91RM9200
diff --git a/arch/arm/mach-at91rm9200/Makefile b/arch/arm/mach-at91rm9200/Makefile
index 81ebc6684ad2..82db957322df 100644
--- a/arch/arm/mach-at91rm9200/Makefile
+++ b/arch/arm/mach-at91rm9200/Makefile
@@ -10,6 +10,7 @@ obj- :=
obj-$(CONFIG_PM) += pm.o
# Board-specific support
+obj-$(CONFIG_MACH_ONEARM) += board-1arm.o
obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o
obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o
obj-$(CONFIG_MACH_CSB337) += board-csb337.o
diff --git a/arch/arm/mach-at91rm9200/board-1arm.c b/arch/arm/mach-at91rm9200/board-1arm.c
new file mode 100644
index 000000000000..dc79e0992af7
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-1arm.c
@@ -0,0 +1,109 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-1arm.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/hardware.h>
+#include <asm/arch/board.h>
+#include <asm/arch/gpio.h>
+
+#include "generic.h"
+
+static void __init onearm_init_irq(void)
+{
+ /* Initialize AIC controller */
+ at91rm9200_init_irq(NULL);
+
+ /* Set up the GPIO interrupts */
+ at91_gpio_irq_setup(PQFP_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ * 0 .. 3 = USART0 .. USART3
+ * 4 = DBGU
+ */
+static struct at91_uart_config __initdata onearm_uart_config = {
+ .console_tty = 0, /* ttyS0 */
+ .nr_tty = 3,
+ .tty_map = { 4, 0, 1, -1, -1 }, /* ttyS0, ..., ttyS4 */
+};
+
+static void __init onearm_map_io(void)
+{
+ at91rm9200_map_io();
+
+ /* Initialize clocks: 18.432 MHz crystal */
+ at91_clock_init(18432000);
+
+ /* Setup the serial ports and console */
+ at91_init_serial(&onearm_uart_config);
+}
+
+static struct at91_eth_data __initdata onearm_eth_data = {
+ .phy_irq_pin = AT91_PIN_PC4,
+ .is_rmii = 1,
+};
+
+static struct at91_usbh_data __initdata onearm_usbh_data = {
+ .ports = 1,
+};
+
+static struct at91_udc_data __initdata onearm_udc_data = {
+ .vbus_pin = AT91_PIN_PC2,
+ .pullup_pin = AT91_PIN_PC3,
+};
+
+static void __init onearm_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* Ethernet */
+ at91_add_device_eth(&onearm_eth_data);
+ /* USB Host */
+ at91_add_device_usbh(&onearm_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&onearm_udc_data);
+}
+
+MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
+ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91rm9200_timer,
+ .map_io = onearm_map_io,
+ .init_irq = onearm_init_irq,
+ .init_machine = onearm_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index cec5a21ca4e3..e15e4c54a253 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -2,8 +2,19 @@ if ARCH_EP93XX
menu "Cirrus EP93xx Implementation Options"
+config CRUNCH
+ bool "Support for MaverickCrunch"
+ help
+ Enable kernel support for MaverickCrunch.
+
comment "EP93xx Platforms"
+config MACH_EDB9315
+ bool "Support Cirrus Logic EDB9315"
+ help
+ Say 'Y' here if you want your kernel to support the Cirrus
+ Logic EDB9315 Evaluation Board.
+
config MACH_GESBC9312
bool "Support Glomation GESBC-9312-sx"
help
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 05a48a21038e..dfa7e2e8a18b 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -6,5 +6,6 @@ obj-m :=
obj-n :=
obj- :=
+obj-$(CONFIG_MACH_EDB9315) += edb9315.o
obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
obj-$(CONFIG_MACH_TS72XX) += ts72xx.o
diff --git a/arch/arm/mach-ep93xx/edb9315.c b/arch/arm/mach-ep93xx/edb9315.c
new file mode 100644
index 000000000000..ef7482faad81
--- /dev/null
+++ b/arch/arm/mach-ep93xx/edb9315.c
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-ep93xx/edb9315.c
+ * Cirrus Logic EDB9315 support.
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static struct physmap_flash_data edb9315_flash_data = {
+ .width = 4,
+};
+
+static struct resource edb9315_flash_resource = {
+ .start = 0x60000000,
+ .end = 0x61ffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device edb9315_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &edb9315_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &edb9315_flash_resource,
+};
+
+static void __init edb9315_init_machine(void)
+{
+ ep93xx_init_devices();
+ platform_device_register(&edb9315_flash);
+}
+
+MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
+ /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
+ .phys_io = EP93XX_APB_PHYS_BASE,
+ .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb9315_init_machine,
+MACHINE_END
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 47cc6c8b7c79..2c28d66d260e 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -30,7 +30,7 @@ static struct physmap_flash_data gesbc9312_flash_data = {
static struct resource gesbc9312_flash_resource = {
.start = 0x60000000,
- .end = 0x60800000,
+ .end = 0x607fffff,
.flags = IORESOURCE_MEM,
};
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 6e5a56cd5ae8..0b3b875b1875 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -118,7 +118,7 @@ static struct physmap_flash_data ts72xx_flash_data = {
static struct resource ts72xx_flash_resource = {
.start = TS72XX_NOR_PHYS_BASE,
- .end = TS72XX_NOR_PHYS_BASE + 0x01000000,
+ .end = TS72XX_NOR_PHYS_BASE + 0x00ffffff,
.flags = IORESOURCE_MEM,
};
diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c
index dc5e489c70bc..357351fbb1e2 100644
--- a/arch/arm/mach-ixp23xx/espresso.c
+++ b/arch/arm/mach-ixp23xx/espresso.c
@@ -59,7 +59,7 @@ static struct physmap_flash_data espresso_flash_data = {
static struct resource espresso_flash_resource = {
.start = 0x90000000,
- .end = 0x92000000,
+ .end = 0x91ffffff,
.flags = IORESOURCE_MEM,
};
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index 535b334ee045..e0886871cc77 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -304,7 +304,7 @@ static struct physmap_flash_data ixdp2351_flash_data = {
static struct resource ixdp2351_flash_resource = {
.start = 0x90000000,
- .end = 0x94000000,
+ .end = 0x93ffffff,
.flags = IORESOURCE_MEM,
};
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c
index b9f5d13fcfe1..92ad18f41251 100644
--- a/arch/arm/mach-ixp23xx/roadrunner.c
+++ b/arch/arm/mach-ixp23xx/roadrunner.c
@@ -143,7 +143,7 @@ static struct physmap_flash_data roadrunner_flash_data = {
static struct resource roadrunner_flash_resource = {
.start = 0x90000000,
- .end = 0x94000000,
+ .end = 0x93ffffff,
.flags = IORESOURCE_MEM,
};
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 3b23f43cb160..57f23b465392 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -35,7 +35,6 @@ config ARCH_ADI_COYOTE
config ARCH_IXDP425
bool "IXDP425"
- select PCI
help
Say 'Y' here if you want your kernel to support Intel's
IXDP425 Development Platform (Also known as Richfield).
@@ -43,7 +42,6 @@ config ARCH_IXDP425
config MACH_IXDPG425
bool "IXDPG425"
- select PCI
help
Say 'Y' here if you want your kernel to support Intel's
IXDPG425 Development Platform (Also known as Montajade).
@@ -51,7 +49,6 @@ config MACH_IXDPG425
config MACH_IXDP465
bool "IXDP465"
- select PCI
help
Say 'Y' here if you want your kernel to support Intel's
IXDP465 Development Platform (Also known as BMP).
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index 5a4aaa0e0a09..640315d8b96a 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -2,13 +2,23 @@
# Makefile for the linux kernel.
#
+obj-pci-y :=
+obj-pci-n :=
+
+obj-pci-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o
+obj-pci-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o
+obj-pci-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o
+obj-pci-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o
+obj-pci-$(CONFIG_MACH_NSLU2) += nslu2-pci.o
+obj-pci-$(CONFIG_MACH_NAS100D) += nas100d-pci.o
+
obj-y += common.o
-obj-$(CONFIG_PCI) += common-pci.o
-obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o
-obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o
-obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o
-obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o gtwx5715-setup.o
-obj-$(CONFIG_MACH_NSLU2) += nslu2-pci.o nslu2-setup.o nslu2-power.o
-obj-$(CONFIG_MACH_NAS100D) += nas100d-pci.o nas100d-setup.o nas100d-power.o
+obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-setup.o
+obj-$(CONFIG_MACH_IXDPG425) += coyote-setup.o
+obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-setup.o
+obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-setup.o
+obj-$(CONFIG_MACH_NSLU2) += nslu2-setup.o nslu2-power.o
+obj-$(CONFIG_MACH_NAS100D) += nas100d-setup.o nas100d-power.o
+obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 539b596005fc..d9635ff4b10c 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -88,8 +88,8 @@ static int pxa_gpio_irq_type(unsigned int irq, unsigned int type)
if (type == IRQT_PROBE) {
/* Don't mess with enabled GPIOs using preconfigured edges or
- GPIOs set to alternate function during probe */
- if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx]) &
+ GPIOs set to alternate function or to output during probe */
+ if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx] | GPDR(gpio)) &
GPIO_bit(gpio))
return 0;
if (GAFR(gpio) & (0x3 << (((gpio) & 0xf)*2)))
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index c9862688ff3d..0650bed3b96e 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -189,7 +189,7 @@ ENTRY(pxa_cpu_suspend)
.data
.align 5
ENTRY(pxa_cpu_resume)
- mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC @ set SVC, irqs off
+ mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
msr cpsr_c, r0
ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index f5d9cd498a5f..b4171dd43df0 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -71,13 +71,13 @@ config ARCH_S3C2440
Say Y here if you are using the SMDK2440.
config SMDK2440_CPU2440
- bool "SMDK2440 with S3C2440 cpu module"
+ bool "SMDK2440 with S3C2440 CPU module"
depends on ARCH_S3C2440
default y if ARCH_S3C2440
select CPU_S3C2440
config SMDK2440_CPU2442
- bool "SMDM2440 with S3C2442 cpu module"
+ bool "SMDM2440 with S3C2442 CPU module"
depends on ARCH_S3C2440
select CPU_S3C2442
diff --git a/arch/arm/mach-s3c2410/s3c244x.c b/arch/arm/mach-s3c2410/s3c244x.c
index 838bc525e836..9a2258270de9 100644
--- a/arch/arm/mach-s3c2410/s3c244x.c
+++ b/arch/arm/mach-s3c2410/s3c244x.c
@@ -69,6 +69,7 @@ void __init s3c244x_map_io(struct map_desc *mach_desc, int size)
s3c_device_i2c.name = "s3c2440-i2c";
s3c_device_nand.name = "s3c2440-nand";
+ s3c_device_usbgadget.name = "s3c2440-usbgadget";
}
void __init s3c244x_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 5f6761ed96b2..dc27167f4d59 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -128,7 +128,7 @@ s3c2410_sleep_save_phys:
*/
ENTRY(s3c2410_cpu_resume)
- mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC
+ mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
msr cpsr_c, r0
@@ load UART to allow us to print the two characters for
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 2fa1e289d177..5a84062f92af 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -177,7 +177,7 @@ sa1110_sdram_controller_fix:
.data
.align 5
ENTRY(sa1100_cpu_resume)
- mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
+ mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, r0 @ set SVC, irqs off
ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ecf5e232a6fc..c4bca753165b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -15,8 +15,8 @@ config CPU_ARM610
select CPU_32v3
select CPU_CACHE_V3
select CPU_CACHE_VIVT
- select CPU_COPY_V3
- select CPU_TLB_V3
+ select CPU_COPY_V3 if MMU
+ select CPU_TLB_V3 if MMU
help
The ARM610 is the successor to the ARM3 processor
and was produced by VLSI Technology Inc.
@@ -31,8 +31,8 @@ config CPU_ARM710
select CPU_32v3
select CPU_CACHE_V3
select CPU_CACHE_VIVT
- select CPU_COPY_V3
- select CPU_TLB_V3
+ select CPU_COPY_V3 if MMU
+ select CPU_TLB_V3 if MMU
help
A 32-bit RISC microprocessor based on the ARM7 processor core
designed by Advanced RISC Machines Ltd. The ARM710 is the
@@ -50,8 +50,8 @@ config CPU_ARM720T
select CPU_ABRT_LV4T
select CPU_CACHE_V4
select CPU_CACHE_VIVT
- select CPU_COPY_V4WT
- select CPU_TLB_V4WT
+ select CPU_COPY_V4WT if MMU
+ select CPU_TLB_V4WT if MMU
help
A 32-bit RISC processor with 8kByte Cache, Write Buffer and
MMU built around an ARM7TDMI core.
@@ -68,8 +68,8 @@ config CPU_ARM920T
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
help
The ARM920T is licensed to be produced by numerous vendors,
and is used in the Maverick EP9312 and the Samsung S3C2410.
@@ -89,8 +89,8 @@ config CPU_ARM922T
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
help
The ARM922T is a version of the ARM920T, but with smaller
instruction and data caches. It is used in Altera's
@@ -108,8 +108,8 @@ config CPU_ARM925T
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
help
The ARM925T is a mix between the ARM920T and ARM926T, but with
different instruction and data caches. It is used in TI's OMAP
@@ -126,8 +126,8 @@ config CPU_ARM926T
select CPU_32v5
select CPU_ABRT_EV5TJ
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
help
This is a variant of the ARM920. It has slightly different
instruction sequences for cache and TLB operations. Curiously,
@@ -144,8 +144,8 @@ config CPU_ARM1020
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
help
The ARM1020 is the 32K cached version of the ARM10 processor,
with an addition of a floating-point unit.
@@ -161,8 +161,8 @@ config CPU_ARM1020E
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WBI if MMU
depends on n
# ARM1022E
@@ -172,8 +172,8 @@ config CPU_ARM1022
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB # can probably do better
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU # can probably do better
+ select CPU_TLB_V4WBI if MMU
help
The ARM1022E is an implementation of the ARMv5TE architecture
based upon the ARM10 integer core with a 16KiB L1 Harvard cache,
@@ -189,8 +189,8 @@ config CPU_ARM1026
select CPU_32v5
select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB # can probably do better
- select CPU_TLB_V4WBI
+ select CPU_COPY_V4WB if MMU # can probably do better
+ select CPU_TLB_V4WBI if MMU
help
The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture
based upon the ARM10 integer core.
@@ -207,8 +207,8 @@ config CPU_SA110
select CPU_ABRT_EV4
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
- select CPU_COPY_V4WB
- select CPU_TLB_V4WB
+ select CPU_COPY_V4WB if MMU
+ select CPU_TLB_V4WB if MMU
help
The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and
is available at five speeds ranging from 100 MHz to 233 MHz.
@@ -227,7 +227,7 @@ config CPU_SA1100
select CPU_ABRT_EV4
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
- select CPU_TLB_V4WB
+ select CPU_TLB_V4WB if MMU
# XScale
config CPU_XSCALE
@@ -237,7 +237,7 @@ config CPU_XSCALE
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
- select CPU_TLB_V4WBI
+ select CPU_TLB_V4WBI if MMU
# XScale Core Version 3
config CPU_XSC3
@@ -247,7 +247,7 @@ config CPU_XSC3
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
- select CPU_TLB_V4WBI
+ select CPU_TLB_V4WBI if MMU
select IO_36
# ARMv6
@@ -258,8 +258,8 @@ config CPU_V6
select CPU_ABRT_EV6
select CPU_CACHE_V6
select CPU_CACHE_VIPT
- select CPU_COPY_V6
- select CPU_TLB_V6
+ select CPU_COPY_V6 if MMU
+ select CPU_TLB_V6 if MMU
# ARMv6k
config CPU_32v6K
@@ -277,17 +277,17 @@ config CPU_32v6K
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
bool
- select TLS_REG_EMUL if SMP
+ select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
config CPU_32v4
bool
- select TLS_REG_EMUL if SMP
+ select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
config CPU_32v5
bool
- select TLS_REG_EMUL if SMP
+ select TLS_REG_EMUL if SMP || !MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
config CPU_32v6
@@ -334,6 +334,7 @@ config CPU_CACHE_VIVT
config CPU_CACHE_VIPT
bool
+if MMU
# The copy-page model
config CPU_COPY_V3
bool
@@ -372,6 +373,8 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
bool
+endif
+
#
# CPU supports 36-bit I/O
#
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 07a538505784..21a2770226ee 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,10 +2,16 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
-obj-y := consistent.o extable.o fault-armv.o \
- fault.o flush.o init.o ioremap.o mmap.o \
+obj-y := consistent.o extable.o fault.o init.o \
+ iomap.o
+
+obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
mm-armv.o
+ifneq ($(CONFIG_MMU),y)
+obj-y += nommu.o
+endif
+
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S
index 3c58ebbf0359..2ee394b11bcb 100644
--- a/arch/arm/mm/copypage-v3.S
+++ b/arch/arm/mm/copypage-v3.S
@@ -35,7 +35,7 @@ ENTRY(v3_copy_user_page)
stmia r0!, {r3, r4, ip, lr} @ 4
ldmneia r1!, {r3, r4, ip, lr} @ 4
bne 1b @ 1
- LOADREGS(fd, sp!, {r4, pc}) @ 3
+ ldmfd sp!, {r4, pc} @ 3
.align 5
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9ea1f87a7079..989fd681c822 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -26,8 +26,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
-
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
new file mode 100644
index 000000000000..62066f3020c8
--- /dev/null
+++ b/arch/arm/mm/iomap.c
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/arm/mm/iomap.c
+ *
+ * Map IO port and PCI memory spaces so that {read,write}[bwl] can
+ * be used to access this memory.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+
+#ifdef __io
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return __io(port);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
+#ifdef CONFIG_PCI
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ if ((unsigned long)addr >= VMALLOC_START &&
+ (unsigned long)addr < VMALLOC_END)
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index c1f7180c7bed..7691cfdba567 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -176,50 +176,3 @@ void __iounmap(void __iomem *addr)
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(__iounmap);
-
-#ifdef __io
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
- return __io(port);
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_unmap);
-#endif
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#include <linux/ioport.h>
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len || !start)
- return NULL;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return ioport_map(start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
- if ((unsigned long)addr >= VMALLOC_START &&
- (unsigned long)addr < VMALLOC_END)
- iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iounmap);
-#endif
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
new file mode 100644
index 000000000000..1464ed817b5d
--- /dev/null
+++ b/arch/arm/mm/nommu.c
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/arm/mm/nommu.c
+ *
+ * ARM uCLinux supporting functions.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+void flush_dcache_page(struct page *page)
+{
+ __cpuc_flush_dcache_page(page_address(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset,
+ size_t size, unsigned long flags)
+{
+ if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
+ return NULL;
+ return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
+}
+EXPORT_SYMBOL(__ioremap_pfn);
+
+void __iomem *__ioremap(unsigned long phys_addr, size_t size,
+ unsigned long flags)
+{
+ return (void __iomem *)phys_addr;
+}
+EXPORT_SYMBOL(__ioremap);
+
+void __iounmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 959588884fa5..b9abbafca812 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -101,7 +102,9 @@ ENTRY(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -359,6 +362,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm1020_switch_mm)
+#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
@@ -383,6 +387,7 @@ ENTRY(cpu_arm1020_switch_mm)
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
+#endif /* CONFIG_MMU */
mov pc, lr
/*
@@ -392,6 +397,7 @@ ENTRY(cpu_arm1020_switch_mm)
*/
.align 5
ENTRY(cpu_arm1020_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -421,6 +427,7 @@ ENTRY(cpu_arm1020_set_pte)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -430,7 +437,9 @@ __arm1020_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm1020_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index be6d081ff2b7..bcd5ee022e00 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -101,7 +102,9 @@ ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -344,6 +347,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm1020e_switch_mm)
+#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
@@ -367,6 +371,7 @@ ENTRY(cpu_arm1020e_switch_mm)
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -376,6 +381,7 @@ ENTRY(cpu_arm1020e_switch_mm)
*/
.align 5
ENTRY(cpu_arm1020e_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -403,6 +409,7 @@ ENTRY(cpu_arm1020e_set_pte)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -412,7 +419,9 @@ __arm1020e_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm1020e_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index f778545d57a2..b0ccff4fadd2 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -90,7 +91,9 @@ ENTRY(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -333,6 +336,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm1022_switch_mm)
+#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -349,6 +353,7 @@ ENTRY(cpu_arm1022_switch_mm)
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -358,6 +363,7 @@ ENTRY(cpu_arm1022_switch_mm)
*/
.align 5
ENTRY(cpu_arm1022_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -385,6 +391,7 @@ ENTRY(cpu_arm1022_set_pte)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -394,7 +401,9 @@ __arm1022_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm1022_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 148c111fde73..abe850c9a641 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -90,7 +91,9 @@ ENTRY(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -327,6 +330,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm1026_switch_mm)
+#ifdef CONFIG_MMU
mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate
@@ -338,6 +342,7 @@ ENTRY(cpu_arm1026_switch_mm)
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -347,6 +352,7 @@ ENTRY(cpu_arm1026_switch_mm)
*/
.align 5
ENTRY(cpu_arm1026_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -374,6 +380,7 @@ ENTRY(cpu_arm1026_set_pte)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
+#endif /* CONFIG_MMU */
mov pc, lr
@@ -384,8 +391,10 @@ __arm1026_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
mcr p15, 0, r4, c2, c0 @ load page table pointer
+#endif
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ explicitly disable writeback
mcr p15, 7, r0, c15, c0, 0
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 540359b475d0..7a705edfa4b2 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -2,6 +2,7 @@
* linux/arch/arm/mm/proc-arm6,7.S
*
* Copyright (C) 1997-2000 Russell King
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -199,10 +200,12 @@ ENTRY(cpu_arm7_do_idle)
*/
ENTRY(cpu_arm6_switch_mm)
ENTRY(cpu_arm7_switch_mm)
+#ifdef CONFIG_MMU
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
+#endif
mov pc, lr
/*
@@ -214,6 +217,7 @@ ENTRY(cpu_arm7_switch_mm)
.align 5
ENTRY(cpu_arm6_set_pte)
ENTRY(cpu_arm7_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -232,6 +236,7 @@ ENTRY(cpu_arm7_set_pte)
movne r2, #0
str r2, [r0] @ hardware version
+#endif /* CONFIG_MMU */
mov pc, lr
/*
@@ -243,7 +248,9 @@ ENTRY(cpu_arm6_reset)
ENTRY(cpu_arm7_reset)
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache
+#ifdef CONFIG_MMU
mcr p15, 0, r1, c5, c0, 0 @ flush TLB
+#endif
mov r1, #0x30
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
@@ -253,19 +260,27 @@ ENTRY(cpu_arm7_reset)
.type __arm6_setup, #function
__arm6_setup: mov r0, #0
mcr p15, 0, r0, c7, c0 @ flush caches on v3
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
mov r0, #0x3d @ . ..RS BLDP WCAM
orr r0, r0, #0x100 @ . ..01 0011 1101
+#else
+ mov r0, #0x3c @ . ..RS BLDP WCA.
+#endif
mov pc, lr
.size __arm6_setup, . - __arm6_setup
.type __arm7_setup, #function
__arm7_setup: mov r0, #0
mcr p15, 0, r0, c7, c0 @ flush caches on v3
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
mcr p15, 0, r0, c3, c0 @ load domain access register
mov r0, #0x7d @ . ..RS BLDP WCAM
orr r0, r0, #0x100 @ . ..01 0111 1101
+#else
+ mov r0, #0x7c @ . ..RS BLDP WCA.
+#endif
mov pc, lr
.size __arm7_setup, . - __arm7_setup
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 26f00ee2ad9a..86102467d37f 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -4,6 +4,7 @@
* Copyright (C) 2000 Steve Hill (sjhill@cotw.com)
* Rob Scott (rscott@mtrob.fdns.net)
* Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2004.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,6 +30,7 @@
* out of 'proc-arm6,7.S' per RMK discussion
* 07-25-2000 SJH Added idle function.
* 08-25-2000 DBS Updated for integration of ARM Ltd version.
+ * 04-20-2004 HSC modified for non-paged memory management mode.
*/
#include <linux/linkage.h>
#include <linux/init.h>
@@ -75,10 +77,12 @@ ENTRY(cpu_arm720_do_idle)
* the new.
*/
ENTRY(cpu_arm720_switch_mm)
+#ifdef CONFIG_MMU
mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
+#endif
mov pc, lr
/*
@@ -89,6 +93,7 @@ ENTRY(cpu_arm720_switch_mm)
*/
.align 5
ENTRY(cpu_arm720_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -107,6 +112,7 @@ ENTRY(cpu_arm720_set_pte)
movne r2, #0
str r2, [r0] @ hardware version
+#endif
mov pc, lr
/*
@@ -117,7 +123,9 @@ ENTRY(cpu_arm720_set_pte)
ENTRY(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4)
+#endif
mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x2100 @ ..v....s........
@@ -130,7 +138,9 @@ ENTRY(cpu_arm720_reset)
__arm710_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ invalidate caches
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4)
+#endif
mrc p15, 0, r0, c1, c0 @ get control register
ldr r5, arm710_cr1_clear
bic r0, r0, r5
@@ -156,7 +166,9 @@ arm710_cr1_set:
__arm720_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ invalidate caches
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4)
+#endif
mrc p15, 0, r0, c1, c0 @ get control register
ldr r5, arm720_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index a17f79e0199c..31dc839ba07c 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -97,7 +98,9 @@ ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -317,6 +320,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm920_switch_mm)
+#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -337,6 +341,7 @@ ENTRY(cpu_arm920_switch_mm)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -346,6 +351,7 @@ ENTRY(cpu_arm920_switch_mm)
*/
.align 5
ENTRY(cpu_arm920_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -372,6 +378,7 @@ ENTRY(cpu_arm920_set_pte)
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -381,7 +388,9 @@ __arm920_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm920_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index bbde4a024a48..9e57c34f5c09 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -4,6 +4,7 @@
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Copyright (C) 2001 Altera Corporation
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -99,7 +100,9 @@ ENTRY(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -321,6 +324,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm922_switch_mm)
+#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -341,6 +345,7 @@ ENTRY(cpu_arm922_switch_mm)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -350,6 +355,7 @@ ENTRY(cpu_arm922_switch_mm)
*/
.align 5
ENTRY(cpu_arm922_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -376,6 +382,7 @@ ENTRY(cpu_arm922_set_pte)
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -385,7 +392,9 @@ __arm922_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm922_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 224ce226a01b..8d47c9f3f931 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -9,6 +9,8 @@
* Update for Linux-2.6 and cache flush improvements
* Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
*
+ * hacked for non-paged-MM by Hyok S. Choi, 2004.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -122,7 +124,9 @@ ENTRY(cpu_arm925_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -369,6 +373,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm925_switch_mm)
+#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -383,6 +388,7 @@ ENTRY(cpu_arm925_switch_mm)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -392,6 +398,7 @@ ENTRY(cpu_arm925_switch_mm)
*/
.align 5
ENTRY(cpu_arm925_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -420,6 +427,7 @@ ENTRY(cpu_arm925_set_pte)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif /* CONFIG_MMU */
mov pc, lr
__INIT
@@ -438,7 +446,9 @@ __arm925_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ disable write-back on caches explicitly
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 4e2a087cf388..cb4d8f33d2a3 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 1999-2001 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -85,7 +86,9 @@ ENTRY(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -329,6 +332,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
*/
.align 5
ENTRY(cpu_arm926_switch_mm)
+#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -341,6 +345,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mov pc, lr
/*
@@ -350,6 +355,7 @@ ENTRY(cpu_arm926_switch_mm)
*/
.align 5
ENTRY(cpu_arm926_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -378,6 +384,7 @@ ENTRY(cpu_arm926_set_pte)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif
mov pc, lr
__INIT
@@ -387,7 +394,9 @@ __arm926_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index a2dd5ae1077d..5a760a2c629c 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -2,6 +2,7 @@
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -67,7 +68,9 @@ ENTRY(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -130,11 +133,15 @@ ENTRY(cpu_sa110_dcache_clean_area)
*/
.align 5
ENTRY(cpu_sa110_switch_mm)
+#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ldr pc, [sp], #4
+#else
+ mov pc, lr
+#endif
/*
* cpu_sa110_set_pte(ptep, pte)
@@ -143,6 +150,7 @@ ENTRY(cpu_sa110_switch_mm)
*/
.align 5
ENTRY(cpu_sa110_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -164,6 +172,7 @@ ENTRY(cpu_sa110_set_pte)
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif
mov pc, lr
__INIT
@@ -173,7 +182,9 @@ __sa110_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, sa110_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 777ad99c1439..0a2107ad4c32 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -2,6 +2,7 @@
* linux/arch/arm/mm/proc-sa1100.S
*
* Copyright (C) 1997-2002 Russell King
+ * hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -77,7 +78,9 @@ ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
@@ -142,12 +145,16 @@ ENTRY(cpu_sa1100_dcache_clean_area)
*/
.align 5
ENTRY(cpu_sa1100_switch_mm)
+#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ldr pc, [sp], #4
+#else
+ mov pc, lr
+#endif
/*
* cpu_sa1100_set_pte(ptep, pte)
@@ -156,6 +163,7 @@ ENTRY(cpu_sa1100_switch_mm)
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -177,6 +185,7 @@ ENTRY(cpu_sa1100_set_pte)
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif
mov pc, lr
__INIT
@@ -186,7 +195,9 @@ __sa1100_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
+#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, sa1100_cr1_clear
bic r0, r0, r5
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ee6f15298735..ca13d4d05f65 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -2,6 +2,7 @@
* linux/arch/arm/mm/proc-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Modified by Catalin Marinas for noMMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,38 +30,6 @@
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
- .macro cpsie, flags
- .ifc \flags, f
- .long 0xf1080040
- .exitm
- .endif
- .ifc \flags, i
- .long 0xf1080080
- .exitm
- .endif
- .ifc \flags, if
- .long 0xf10800c0
- .exitm
- .endif
- .err
- .endm
-
- .macro cpsid, flags
- .ifc \flags, f
- .long 0xf10c0040
- .exitm
- .endif
- .ifc \flags, i
- .long 0xf10c0080
- .exitm
- .endif
- .ifc \flags, if
- .long 0xf10c00c0
- .exitm
- .endif
- .err
- .endm
-
ENTRY(cpu_v6_proc_init)
mov pc, lr
@@ -120,6 +89,7 @@ ENTRY(cpu_v6_dcache_clean_area)
* - we are not using split page tables
*/
ENTRY(cpu_v6_switch_mm)
+#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
#ifdef CONFIG_SMP
@@ -129,6 +99,7 @@ ENTRY(cpu_v6_switch_mm)
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
mcr p15, 0, r1, c13, c0, 1 @ set context ID
+#endif
mov pc, lr
/*
@@ -151,6 +122,7 @@ ENTRY(cpu_v6_switch_mm)
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0x000003f0
@@ -177,6 +149,7 @@ ENTRY(cpu_v6_set_pte)
str r2, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+#endif
mov pc, lr
@@ -226,12 +199,14 @@ __v6_setup:
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
#ifdef CONFIG_SMP
orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
+#endif /* CONFIG_MMU */
#ifdef CONFIG_VFP
mrc p15, 0, r0, c1, c0, 2
orr r0, r0, #(0xf << 20)
diff --git a/arch/arm/nwfpe/entry26.S b/arch/arm/nwfpe/entry26.S
index 51940a96d6a6..3e6fb5d21d64 100644
--- a/arch/arm/nwfpe/entry26.S
+++ b/arch/arm/nwfpe/entry26.S
@@ -26,7 +26,7 @@
It is called from the kernel with code similar to this:
mov fp, #0
- teqp pc, #PSR_I_BIT | MODE_SVC
+ teqp pc, #PSR_I_BIT | SVC_MODE
ldr r4, .LC2
ldr pc, [r4] @ Call FP module USR entry point
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 6d7de9c0412f..e1372a25311d 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Mon May 8 20:11:05 2006
+# Last update: Mon Jun 26 22:26:08 2006
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -566,8 +566,8 @@ switchgrass MACH_SWITCHGRASS SWITCHGRASS 549
ens_cmu MACH_ENS_CMU ENS_CMU 550
mm6_sdb MACH_MM6_SDB MM6_SDB 551
saturn MACH_SATURN SATURN 552
-i30030evb MACH_ARGONPLUSEVB ARGONPLUSEVB 553
-mxc27530evb MACH_SCMA11EVB SCMA11EVB 554
+i30030evb MACH_I30030EVB I30030EVB 553
+mxc27530evb MACH_MXC27530EVB MXC27530EVB 554
smdk2800 MACH_SMDK2800 SMDK2800 555
mtwilson MACH_MTWILSON MTWILSON 556
ziti MACH_ZITI ZITI 557
@@ -647,7 +647,7 @@ sendt MACH_SENDT SENDT 630
mx2jazz MACH_MX2JAZZ MX2JAZZ 631
multiio MACH_MULTIIO MULTIIO 632
hrdisplay MACH_HRDISPLAY HRDISPLAY 633
-mxc27530ads MACH_SCMA11BB SCMA11BB 634
+mxc27530ads MACH_MXC27530ADS MXC27530ADS 634
trizeps3 MACH_TRIZEPS3 TRIZEPS3 635
zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636
zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637
@@ -721,7 +721,7 @@ gp32 MACH_GP32 GP32 706
gem MACH_GEM GEM 707
i858 MACH_I858 I858 708
hx2750 MACH_HX2750 HX2750 709
-mxc91131evb MACH_ZEUSEVB ZEUSEVB 710
+mxc91131evb MACH_MXC91131EVB MXC91131EVB 710
p700 MACH_P700 P700 711
cpe MACH_CPE CPE 712
spitz MACH_SPITZ SPITZ 713
@@ -802,7 +802,7 @@ cpuat91 MACH_CPUAT91 CPUAT91 787
rea9200 MACH_REA9200 REA9200 788
acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789
ixp425 MACH_IXP425 IXP425 790
-i30030ads MACH_ARGONPLUSODYSSEY ARGONPLUSODYSSEY 791
+i30030ads MACH_I30030ADS I30030ADS 791
perch MACH_PERCH PERCH 792
eis05r1 MACH_EIS05R1 EIS05R1 793
pepperpad MACH_PEPPERPAD PEPPERPAD 794
@@ -930,7 +930,7 @@ netclient MACH_NETCLIENT NETCLIENT 916
xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917
xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918
omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
-mxc30030evb MACH_ARGONLVEVB ARGONLVEVB 920
+mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
rea_2d MACH_REA_2D REA_2D 921
eti3e524 MACH_TI3E524 TI3E524 922
ateb9200 MACH_ATEB9200 ATEB9200 923
@@ -986,7 +986,7 @@ redfox MACH_REDFOX REDFOX 972
mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973
tpf106 MACH_TPF106 TPF106 974
at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975
-racemt2 MACH_SLEDB SLEDB 976
+rcmt2 MACH_SLEDB SLEDB 976
ontrack MACH_ONTRACK ONTRACK 977
pm1200 MACH_PM1200 PM1200 978
ess24562 MACH_ESS24XXX ESS24XXX 979
@@ -1022,7 +1022,7 @@ smdk2440 MACH_SMDK2440 SMDK2440 1008
smdk2412 MACH_SMDK2412 SMDK2412 1009
webbox MACH_WEBBOX WEBBOX 1010
cwwndp MACH_CWWNDP CWWNDP 1011
-dragon MACH_DRAGON DRAGON 1012
+i839 MACH_DRAGON DRAGON 1012
opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013
ccm2200 MACH_CCM2200 CCM2200 1014
etwarm MACH_ETWARM ETWARM 1015
@@ -1040,3 +1040,56 @@ edg79524 MACH_EDG79524 EDG79524 1026
ai2410 MACH_AI2410 AI2410 1027
ixp465 MACH_IXP465 IXP465 1028
balloon3 MACH_BALLOON3 BALLOON3 1029
+heins MACH_HEINS HEINS 1030
+mpluseva MACH_MPLUSEVA MPLUSEVA 1031
+rt042 MACH_RT042 RT042 1032
+cwiem MACH_CWIEM CWIEM 1033
+cm_x270 MACH_CM_X270 CM_X270 1034
+cm_x255 MACH_CM_X255 CM_X255 1035
+esh_at91 MACH_ESH_AT91 ESH_AT91 1036
+sandgate3 MACH_SANDGATE3 SANDGATE3 1037
+primo MACH_PRIMO PRIMO 1038
+gemstone MACH_GEMSTONE GEMSTONE 1039
+pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040
+sidewinder MACH_SIDEWINDER SIDEWINDER 1041
+picomod1 MACH_PICOMOD1 PICOMOD1 1042
+sg590 MACH_SG590 SG590 1043
+akai9307 MACH_AKAI9307 AKAI9307 1044
+fontaine MACH_FONTAINE FONTAINE 1045
+wombat MACH_WOMBAT WOMBAT 1046
+acq300 MACH_ACQ300 ACQ300 1047
+mod_270 MACH_MOD_270 MOD_270 1048
+vmc_vc0820 MACH_VC0820 VC0820 1049
+ani_aim MACH_ANI_AIM ANI_AIM 1050
+jellyfish MACH_JELLYFISH JELLYFISH 1051
+amanita MACH_AMANITA AMANITA 1052
+vlink MACH_VLINK VLINK 1053
+dexflex MACH_DEXFLEX DEXFLEX 1054
+eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055
+arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056
+tabla MACH_TABLA TABLA 1057
+mdirac3 MACH_MDIRAC3 MDIRAC3 1058
+mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059
+at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060
+ani_apm MACH_ANI_APM ANI_APM 1061
+ella1 MACH_ELLA1 ELLA1 1062
+inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063
+inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064
+empos_xm MACH_EMPOS_XM EMPOS_XM 1065
+empos MACH_EMPOS EMPOS 1066
+empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067
+empos_sm MACH_EMPOS_SM EMPOS_SM 1068
+egret MACH_EGRET EGRET 1069
+ostrich MACH_OSTRICH OSTRICH 1070
+n50 MACH_N50 N50 1071
+ecbat91 MACH_ECBAT91 ECBAT91 1072
+stareast MACH_STAREAST STAREAST 1073
+dspg_dw MACH_DSPG_DW DSPG_DW 1074
+onearm MACH_ONEARM ONEARM 1075
+mrg110_6 MACH_MRG110_6 MRG110_6 1076
+wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077
+xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078
+msm6100 MACH_MSM6100 MSM6100 1079
+eti_b1 MACH_ETI_B1 ETI_B1 1080
+za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081
+bit2440 MACH_BIT2440 BIT2440 1082
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 856b665020e7..6a1238a29d6c 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -28,6 +28,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
+config IRQ_PER_CPU
+ bool
+ default y
+
config CRIS
bool
default y
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index 4b368a122015..2d5be93b5197 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -172,7 +172,7 @@ init_IRQ(void)
/* Initialize IRQ handler descriptiors. */
for(i = 2; i < NR_IRQS; i++) {
- irq_desc[i].handler = &crisv10_irq_type;
+ irq_desc[i].chip = &crisv10_irq_type;
set_int_vector(i, interrupt[i]);
}
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 1e9d062103ae..a2b9c60c2777 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -43,10 +43,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
void
pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index c78cc2685133..06260874f018 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -369,7 +369,7 @@ init_IRQ(void)
/* Point all IRQ's to bad handlers. */
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
- irq_desc[j].handler = &crisv32_irq_type;
+ irq_desc[j].chip = &crisv32_irq_type;
set_exception_vector(i, interrupt[j]);
}
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index b504def3e346..6547bb646364 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 0a26bf6f1cd4..4f165c93be42 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -64,10 +64,10 @@ pcibios_update_resource(struct pci_dev *dev, struct resource *root,
*/
void
pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 1596101cfaf8..0463f6335905 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -14,6 +14,10 @@ config X86_32
486, 586, Pentiums, and various instruction-set-compatible chips by
AMD, Cyrix, and others.
+config GENERIC_TIME
+ bool
+ default y
+
config SEMAPHORE_SLEEPERS
bool
default y
@@ -229,7 +233,7 @@ config NR_CPUS
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
- depends on SMP
+ depends on X86_HT
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -238,7 +242,7 @@ config SCHED_SMT
config SCHED_MC
bool "Multi-core scheduler support"
- depends on SMP
+ depends on X86_HT
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
@@ -324,6 +328,15 @@ config X86_MCE_P4THERMAL
Enabling this feature will cause a message to be printed when the P4
enters thermal throttling.
+config VM86
+ default y
+ bool "Enable VM86 support" if EMBEDDED
+ help
+ This option is required by programs like DOSEMU to run 16-bit legacy
+ code on X86 processors. It also may be needed by software like
+ XFree86 to initialize some video cards via BIOS. Disabling this
+ option saves about 6k.
+
config TOSHIBA
tristate "Toshiba Laptop support"
---help---
@@ -516,6 +529,7 @@ config X86_PAE
bool
depends on HIGHMEM64G
default y
+ select RESOURCES_64BIT
# Common NUMA Features
config NUMA
@@ -721,7 +735,7 @@ config KEXEC
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
- but it is indepedent of the system firmware. And like a reboot
+ but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
@@ -767,9 +781,23 @@ config HOTPLUG_CPU
enable suspend on SMP systems. CPUs can be controlled through
/sys/devices/system/cpu.
+config COMPAT_VDSO
+ bool "Compat VDSO support"
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+ ---help---
+ Say N here if you are running a sufficiently recent glibc
+ version (2.3.3 or later), to remove the high-mapped
+ VDSO mapping and to exclusively use the randomized VDSO.
+
+ If unsure, say Y.
endmenu
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
+ depends on HIGHMEM
menu "Power management options (ACPI, APM)"
depends on !X86_VOYAGER
@@ -1046,13 +1074,27 @@ config SCx200
tristate "NatSemi SCx200 support"
depends on !X86_VOYAGER
help
- This provides basic support for the National Semiconductor SCx200
- processor. Right now this is just a driver for the GPIO pins.
+ This provides basic support for National Semiconductor's
+ (now AMD's) Geode processors. The driver probes for the
+ PCI-IDs of several on-chip devices, so its a good dependency
+ for other scx200_* drivers.
- If you don't know what to do here, say N.
+ If compiled as a module, the driver is named scx200.
- This support is also available as a module. If compiled as a
- module, it will be called scx200.
+config SCx200HR_TIMER
+ tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
+ depends on SCx200 && GENERIC_TIME
+ default y
+ help
+ This driver provides a clocksource built upon the on-chip
+ 27MHz high-resolution timer. Its also a workaround for
+ NSC Geode SC-1100's buggy TSC, which loses time when the
+ processor goes idle (as is done by the scheduler). The
+ other workaround is idle=poll boot option.
+
+config K8_NB
+ def_bool y
+ depends on AGP_AMD64
source "drivers/pcmcia/Kconfig"
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index eb130482ba18..21c9a4e71104 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -41,7 +41,7 @@ config M386
- "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
- "Geode GX/LX" For AMD Geode GX and LX processors.
- "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
- - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
+ - "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
If you don't know what to do, choose "386".
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 33e55476381b..e97946626064 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
isoimage: $(BOOTIMAGE)
-rm -rf $(obj)/isoimage
mkdir $(obj)/isoimage
- cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
- $(obj)/isoimage
+ for i in lib lib64 share end ; do \
+ if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ break ; \
+ fi ; \
+ if [ $$i = end ] ; then exit 1 ; fi ; \
+ done
cp $(BOOTIMAGE) $(obj)/isoimage/linux
echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index f19f3a7492a5..b2ccd543410d 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -24,14 +24,6 @@
#undef memset
#undef memcpy
-
-/*
- * Why do we do this? Don't ask me..
- *
- * Incomprehensible are the ways of bootloaders.
- */
-static void* memset(void *, int, size_t);
-static void* memcpy(void *, __const void *, size_t);
#define memzero(s, n) memset ((s), 0, (n))
typedef unsigned char uch;
@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
#endif
#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
-extern char input_data[];
+extern unsigned char input_data[];
extern int input_len;
static long bytes_out = 0;
@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0;
static void *malloc(int size);
static void free(void *where);
+static void *memset(void *s, int c, unsigned n);
+static void *memcpy(void *dest, const void *src, unsigned n);
+
static void putstr(const char *);
extern int end;
@@ -205,7 +200,7 @@ static void putstr(const char *s)
outb_p(0xff & (pos >> 1), vidport+1);
}
-static void* memset(void* s, int c, size_t n)
+static void* memset(void* s, int c, unsigned n)
{
int i;
char *ss = (char*)s;
@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n)
return s;
}
-static void* memcpy(void* __dest, __const void* __src,
- size_t __n)
+static void* memcpy(void* dest, const void* src, unsigned n)
{
int i;
- char *d = (char *)__dest, *s = (char *)__src;
+ char *d = (char *)dest, *s = (char *)src;
- for (i=0;i<__n;i++) d[i] = s[i];
- return __dest;
+ for (i=0;i<n;i++) d[i] = s[i];
+ return dest;
}
/* ===========================================================================
@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void)
#else
if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif
- output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
+ output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode;
}
@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
#ifdef STANDARD_MEMORY_BIOS_CALL
if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
#else
- if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) <
- (3*1024))
- error("Less than 4MB of memory");
+ if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
#endif
- mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START;
+ mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
low_buffer_size = low_buffer_end - LOW_BUFFER_START;
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index c9343c3a8082..8c2a6faeeae5 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -1929,7 +1929,7 @@ skip10: movb %ah, %al
ret
store_edid:
-#ifdef CONFIG_FB_FIRMWARE_EDID
+#ifdef CONFIG_FIRMWARE_EDID
pushw %es # just save all registers
pushw %ax
pushw %bx
@@ -1947,6 +1947,22 @@ store_edid:
rep
stosl
+ pushw %es # save ES
+ xorw %di, %di # Report Capability
+ pushw %di
+ popw %es # ES:DI must be 0:0
+ movw $0x4f15, %ax
+ xorw %bx, %bx
+ xorw %cx, %cx
+ int $0x10
+ popw %es # restore ES
+
+ cmpb $0x00, %ah # call successful
+ jne no_edid
+
+ cmpb $0x4f, %al # function supported
+ jne no_edid
+
movw $0x4f15, %ax # do VBE/DDC
movw $0x01, %bx
movw $0x00, %cx
@@ -1954,6 +1970,7 @@ store_edid:
movw $0x140, %di
int $0x10
+no_edid:
popw %di # restore all registers
popw %dx
popw %cx
diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S
index 911b15377f2e..f942f0c8f630 100644
--- a/arch/i386/crypto/aes-i586-asm.S
+++ b/arch/i386/crypto/aes-i586-asm.S
@@ -36,22 +36,19 @@
.file "aes-i586-asm.S"
.text
-// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-
-#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
+#include <asm/asm-offsets.h>
-// offsets to parameters with one register pushed onto stack
-
-#define in_blk 8 // input byte array address parameter
-#define out_blk 12 // output byte array address parameter
-#define ctx 16 // AES context structure
+#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
-// offsets in context structure
+/* offsets to parameters with one register pushed onto stack */
+#define tfm 8
+#define out_blk 12
+#define in_blk 16
-#define ekey 0 // encryption key schedule base address
-#define nrnd 256 // number of rounds
-#define dkey 260 // decryption key schedule base address
+/* offsets in crypto_tfm structure */
+#define ekey (crypto_tfm_ctx_offset + 0)
+#define nrnd (crypto_tfm_ctx_offset + 256)
+#define dkey (crypto_tfm_ctx_offset + 260)
// register mapping for encrypt and decrypt subroutines
@@ -220,6 +217,7 @@
do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */
// AES (Rijndael) Encryption Subroutine
+/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
.global aes_enc_blk
@@ -230,7 +228,7 @@
aes_enc_blk:
push %ebp
- mov ctx(%esp),%ebp // pointer to context
+ mov tfm(%esp),%ebp
// CAUTION: the order and the values used in these assigns
// rely on the register mappings
@@ -295,6 +293,7 @@ aes_enc_blk:
ret
// AES (Rijndael) Decryption Subroutine
+/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
.global aes_dec_blk
@@ -305,7 +304,7 @@ aes_enc_blk:
aes_dec_blk:
push %ebp
- mov ctx(%esp),%ebp // pointer to context
+ mov tfm(%esp),%ebp
// CAUTION: the order and the values used in these assigns
// rely on the register mappings
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index a50397b1d5c7..d3806daa3de3 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -45,8 +45,8 @@
#include <linux/crypto.h>
#include <linux/linkage.h>
-asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx);
-asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx);
+asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
@@ -378,12 +378,12 @@ static void gen_tabs(void)
k[8*(i)+11] = ss[3]; \
}
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
int i;
u32 ss[8];
- struct aes_ctx *ctx = ctx_arg;
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
/* encryption schedule */
@@ -464,16 +464,16 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
return 0;
}
-static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- aes_enc_blk(src, dst, ctx);
+ aes_enc_blk(tfm, dst, src);
}
-static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- aes_dec_blk(src, dst, ctx);
+ aes_dec_blk(tfm, dst, src);
}
-
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-i586",
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 96fb8a020af2..5e70c2fb273a 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,10 +7,9 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
pci-dma.o i386_ksyms.o i387.o bootflag.o \
- quirks.o i8237.o topology.o alternative.o
+ quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
obj-y += cpu/
-obj-y += timers/
obj-y += acpi/
obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
obj-$(CONFIG_MCA) += mca.o
@@ -37,6 +36,8 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
obj-$(CONFIG_VM86) += vm86.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_HPET_TIMER) += hpet.o
+obj-$(CONFIG_K8_NB) += k8.o
EXTRA_AFLAGS := -traditional
@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall)
+
+k8-y += ../../x86_64/kernel/k8.o
+
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 5cbd6f99fb2a..50eb0e03777e 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -4,27 +4,41 @@
#include <asm/alternative.h>
#include <asm/sections.h>
-#define DEBUG 0
-#if DEBUG
-# define DPRINTK(fmt, args...) printk(fmt, args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
+static int no_replacement = 0;
+static int smp_alt_once = 0;
+static int debug_alternative = 0;
+
+static int __init noreplacement_setup(char *s)
+{
+ no_replacement = 1;
+ return 1;
+}
+static int __init bootonly(char *str)
+{
+ smp_alt_once = 1;
+ return 1;
+}
+static int __init debug_alt(char *str)
+{
+ debug_alternative = 1;
+ return 1;
+}
+__setup("noreplacement", noreplacement_setup);
+__setup("smp-alt-boot", bootonly);
+__setup("debug-alternative", debug_alt);
+
+#define DPRINTK(fmt, args...) if (debug_alternative) \
+ printk(KERN_DEBUG fmt, args)
+
+#ifdef GENERIC_NOP1
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings. */
asm("\t.data\nintelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8);
-asm("\t.data\nk8nops: "
- K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
- K8_NOP7 K8_NOP8);
-asm("\t.data\nk7nops: "
- K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
- K7_NOP7 K7_NOP8);
-
-extern unsigned char intelnops[], k8nops[], k7nops[];
+extern unsigned char intelnops[];
static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
NULL,
intelnops,
@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
+#endif
+
+#ifdef K8_NOP1
+asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+extern unsigned char k8nops[];
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
NULL,
k8nops,
@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
+#endif
+
+#ifdef K7_NOP1
+asm("\t.data\nk7nops: "
+ K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+ K7_NOP7 K7_NOP8);
+extern unsigned char k7nops[];
static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
NULL,
k7nops,
@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
+#endif
+
+#ifdef CONFIG_X86_64
+
+extern char __vsyscall_0;
+static inline unsigned char** find_nop_table(void)
+{
+ return k8_nops;
+}
+
+#else /* CONFIG_X86_64 */
+
static struct nop {
int cpuid;
unsigned char **noptable;
@@ -67,14 +107,6 @@ static struct nop {
{ -1, NULL }
};
-
-extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
-extern u8 *__smp_locks[], *__smp_locks_end[];
-
-extern u8 __smp_alt_begin[], __smp_alt_end[];
-
-
static unsigned char** find_nop_table(void)
{
unsigned char **noptable = intel_nops;
@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void)
return noptable;
}
+#endif /* CONFIG_X86_64 */
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
+extern u8 *__smp_locks[], *__smp_locks_end[];
+
+extern u8 __smp_alt_begin[], __smp_alt_end[];
+
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where
@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
unsigned char **noptable = find_nop_table();
struct alt_instr *a;
+ u8 *instr;
int diff, i, k;
DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
BUG_ON(a->replacementlen > a->instrlen);
if (!boot_cpu_has(a->cpuid))
continue;
- memcpy(a->instr, a->replacement, a->replacementlen);
+ instr = a->instr;
+#ifdef CONFIG_X86_64
+ /* vsyscall code is not mapped yet. resolve it manually. */
+ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
+ instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
+ DPRINTK("%s: vsyscall fixup: %p => %p\n",
+ __FUNCTION__, a->instr, instr);
+ }
+#endif
+ memcpy(instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */
for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
@@ -186,14 +236,6 @@ struct smp_alt_module {
static LIST_HEAD(smp_alt_modules);
static DEFINE_SPINLOCK(smp_alt);
-static int smp_alt_once = 0;
-static int __init bootonly(char *str)
-{
- smp_alt_once = 1;
- return 1;
-}
-__setup("smp-alt-boot", bootonly);
-
void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end)
@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
struct smp_alt_module *smp;
unsigned long flags;
+ if (no_replacement)
+ return;
+
if (smp_alt_once) {
if (boot_cpu_has(X86_FEATURE_UP))
alternatives_smp_unlock(locks, locks_end,
@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod)
struct smp_alt_module *item;
unsigned long flags;
- if (smp_alt_once)
+ if (no_replacement || smp_alt_once)
return;
spin_lock_irqsave(&smp_alt, flags);
@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp)
struct smp_alt_module *mod;
unsigned long flags;
- if (smp_alt_once)
+ if (no_replacement || smp_alt_once)
return;
BUG_ON(!smp && (num_online_cpus() > 1));
@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp)
void __init alternative_instructions(void)
{
+ if (no_replacement) {
+ printk(KERN_INFO "(SMP-)alternatives turned off\n");
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_alt_begin,
+ (unsigned long)__smp_alt_end);
+ return;
+ }
apply_alternatives(__alt_instructions, __alt_instructions_end);
/* switch to patch-once-at-boottime-only mode and free the
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5ab59c12335b..7ce09492fc0c 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -36,6 +36,7 @@
#include <asm/arch_hooks.h>
#include <asm/hpet.h>
#include <asm/i8253.h>
+#include <asm/nmi.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -156,7 +157,7 @@ void clear_local_APIC(void)
maxlvt = get_maxlvt();
/*
- * Masking an LVT entry on a P6 can trigger a local APIC error
+ * Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void)
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
+ /*
+ * When an illegal vector value (0-15) is written to an LVT
+ * entry and delivery mode is Fixed, the APIC may signal an
+ * illegal vector error, with out regard to whether the mask
+ * bit is set or whether an interrupt is actually seen on input.
+ *
+ * Boot sequence might call this function when the LVTT has
+ * '0' vector value. So make sure vector field is set to
+ * valid value.
+ */
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write_around(APIC_LVTT, v);
}
}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 9e819eb68229..7c5729d1fd06 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -764,9 +764,9 @@ static int apm_do_idle(void)
int idled = 0;
int polling;
- polling = test_thread_flag(TIF_POLLING_NRFLAG);
+ polling = !!(current_thread_info()->status & TS_POLLING);
if (polling) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
}
if (!need_resched()) {
@@ -774,7 +774,7 @@ static int apm_do_idle(void)
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
}
if (polling)
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
if (!idled)
return 0;
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 36d66e2077d0..c80271f8f084 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
* to extract and format the required data.
*/
+#include <linux/crypto.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/personality.h>
@@ -13,6 +14,7 @@
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <asm/elf.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -53,6 +55,7 @@ void foo(void)
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block);
+ OFFSET(TI_sysenter_return, thread_info, sysenter_return);
BLANK();
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
@@ -68,5 +71,7 @@ void foo(void)
sizeof(struct tss_struct));
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
- DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL));
+ DEFINE(VDSO_PRELINK, VDSO_PRELINK);
+
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 786d1a57048b..e6a2d6b80cda 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -224,22 +224,26 @@ static void __init init_amd(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT
/*
- * On a AMD dual core setup the lower bits of the APIC id
- * distingush the cores. Assumes number of cores is a power
- * of two.
+ * On a AMD multi core setup the lower bits of the APIC id
+ * distingush the cores.
*/
if (c->x86_max_cores > 1) {
int cpu = smp_processor_id();
- unsigned bits = 0;
- while ((1 << bits) < c->x86_max_cores)
- bits++;
- cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
- phys_proc_id[cpu] >>= bits;
+ unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
+
+ if (bits == 0) {
+ while ((1 << bits) < c->x86_max_cores)
+ bits++;
+ }
+ c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
+ c->phys_proc_id >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
- cpu, c->x86_max_cores, cpu_core_id[cpu]);
+ cpu, c->x86_max_cores, c->cpu_core_id);
}
#endif
+ if (cpuid_eax(0x80000000) >= 0x80000006)
+ num_cache_leaves = 3;
}
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 44f2c5f2dda1..70c87de582c7 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
#else
c->apicid = (ebx >> 24) & 0xFF;
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
early_intel_workaround(c);
#ifdef CONFIG_X86_HT
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif
}
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
- int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
-
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
} else if (smp_num_siblings > 1 ) {
if (smp_num_siblings > NR_CPUS) {
- printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ printk(KERN_WARNING "CPU: Unsupported number of the "
+ "siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
index_msb = get_count_order(smp_num_siblings);
- phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ c->phys_proc_id);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores);
- cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- cpu_core_id[cpu]);
+ c->cpu_core_id);
}
}
#endif
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void)
set_in_cr4(X86_CR4_TSD);
}
+ /* The CPU hotplug case */
+ if (cpu_gdt_descr->address) {
+ gdt = (struct desc_struct *)cpu_gdt_descr->address;
+ memset(gdt, 0, PAGE_SIZE);
+ goto old_gdt;
+ }
/*
* This is a horrible hack to allocate the GDT. The problem
* is that cpu_init() is called really early for the boot CPU
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void)
local_irq_enable();
}
}
-
+old_gdt:
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index fc32c8028e24..f03b7f94c304 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -354,7 +354,7 @@ static void __init init_nsc(struct cpuinfo_x86 *c)
* This function only handles the GX processor, and kicks every
* thing else to the Cyrix init function above - that should
* cover any processors that might have been branded differently
- * after NSC aquired Cyrix.
+ * after NSC acquired Cyrix.
*
* If this breaks your GX1 horribly, please e-mail
* info-linux@ldcmail.amd.com to tell us.
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5386b29bb5a5..10afc645c540 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
select_idle_routine(c);
l2 = init_intel_cacheinfo(c);
+ if (c->cpuid_level > 9 ) {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+ set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+ }
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index c8547a6fa7e6..e9f0b928b0a9 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,6 +4,7 @@
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
+ * Andi Kleen : CPUID4 emulation on AMD.
*/
#include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
cpumask_t shared_cpu_map;
};
-static unsigned short num_cache_leaves;
+unsigned short num_cache_leaves;
+
+/* AMD doesn't have CPUID4. Emulate it here to report the same
+ information to the user. This makes some assumptions about the machine:
+ No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+
+ In theory the TLBs could be reported as fake type (they are in "dummy").
+ Maybe later */
+union l1_cache {
+ struct {
+ unsigned line_size : 8;
+ unsigned lines_per_tag : 8;
+ unsigned assoc : 8;
+ unsigned size_in_kb : 8;
+ };
+ unsigned val;
+};
+
+union l2_cache {
+ struct {
+ unsigned line_size : 8;
+ unsigned lines_per_tag : 4;
+ unsigned assoc : 4;
+ unsigned size_in_kb : 16;
+ };
+ unsigned val;
+};
+
+static const unsigned short assocs[] = {
+ [1] = 1, [2] = 2, [4] = 4, [6] = 8,
+ [8] = 16,
+ [0xf] = 0xffff // ??
+ };
+static const unsigned char levels[] = { 1, 1, 2 };
+static const unsigned char types[] = { 1, 2, 3 };
+
+static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
+ union _cpuid4_leaf_ebx *ebx,
+ union _cpuid4_leaf_ecx *ecx)
+{
+ unsigned dummy;
+ unsigned line_size, lines_per_tag, assoc, size_in_kb;
+ union l1_cache l1i, l1d;
+ union l2_cache l2;
+
+ eax->full = 0;
+ ebx->full = 0;
+ ecx->full = 0;
+
+ cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
+ cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
+
+ if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
+ return;
+
+ eax->split.is_self_initializing = 1;
+ eax->split.type = types[leaf];
+ eax->split.level = levels[leaf];
+ eax->split.num_threads_sharing = 0;
+ eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+
+ if (leaf <= 1) {
+ union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
+ assoc = l1->assoc;
+ line_size = l1->line_size;
+ lines_per_tag = l1->lines_per_tag;
+ size_in_kb = l1->size_in_kb;
+ } else {
+ assoc = l2.assoc;
+ line_size = l2.line_size;
+ lines_per_tag = l2.lines_per_tag;
+ /* cpu_data has errata corrections for K7 applied */
+ size_in_kb = current_cpu_data.x86_cache_size;
+ }
+
+ if (assoc == 0xf)
+ eax->split.is_fully_associative = 1;
+ ebx->split.coherency_line_size = line_size - 1;
+ ebx->split.ways_of_associativity = assocs[assoc] - 1;
+ ebx->split.physical_line_partition = lines_per_tag - 1;
+ ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
+ (ebx->split.ways_of_associativity + 1) - 1;
+}
static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
- unsigned int eax, ebx, ecx, edx;
- union _cpuid4_leaf_eax cache_eax;
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned edx;
- cpuid_count(4, index, &eax, &ebx, &ecx, &edx);
- cache_eax.full = eax;
- if (cache_eax.split.type == CACHE_TYPE_NULL)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ amd_cpuid4(index, &eax, &ebx, &ecx);
+ else
+ cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
+ if (eax.split.type == CACHE_TYPE_NULL)
return -EIO; /* better error ? */
- this_leaf->eax.full = eax;
- this_leaf->ebx.full = ebx;
- this_leaf->ecx.full = ecx;
- this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) *
- (this_leaf->ebx.split.coherency_line_size + 1) *
- (this_leaf->ebx.split.physical_line_partition + 1) *
- (this_leaf->ebx.split.ways_of_associativity + 1);
+ this_leaf->eax = eax;
+ this_leaf->ebx = ebx;
+ this_leaf->ecx = ecx;
+ this_leaf->size = (ecx.split.number_of_sets + 1) *
+ (ebx.split.coherency_line_size + 1) *
+ (ebx.split.physical_line_partition + 1) *
+ (ebx.split.ways_of_associativity + 1);
return 0;
}
@@ -174,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
#endif
@@ -296,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (new_l2) {
l2 = new_l2;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l2_id;
#endif
}
if (new_l3) {
l3 = new_l3;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l3_id;
#endif
}
@@ -642,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
return;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -660,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cacheinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{
.notifier_call = cacheinfo_cpu_callback,
};
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index a19fcb262dbb..f54a15268ed7 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
* applications want to get the raw CPUID data, they should access
* /dev/cpu/<cpu_nr>/cpuid instead.
*/
- static char *x86_cap_flags[] = {
+ static const char * const x86_cap_flags[] = {
/* Intel-defined */
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- static char *x86_power_flags[] = {
+ static const char * const x86_power_flags[] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_X86_HT
if (c->x86_max_cores * smp_num_siblings > 1) {
- seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
- seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 1d9a4abcdfc7..f6dfa9fb675c 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
return NOTIFY_OK;
}
-static struct notifier_block cpuid_class_cpu_notifier =
+static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
{
.notifier_call = cpuid_class_cpu_callback,
};
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 21dc1bbb8067..48f0f62f781c 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
return 1;
}
-/*
- * By using the NMI code instead of a vector we just sneak thru the
- * word generator coming out with just what we want. AND it does
- * not matter if clustered_apic_mode is set or not.
- */
static void smp_send_nmi_allbutself(void)
{
- send_IPI_allbutself(APIC_DM_NMI);
+ send_IPI_allbutself(NMI_VECTOR);
}
static void nmi_shootdown_cpus(void)
@@ -163,7 +158,7 @@ static void nmi_shootdown_cpus(void)
void machine_crash_shutdown(struct pt_regs *regs)
{
/* This function is only called after the system
- * has paniced or is otherwise in a critical state.
+ * has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 9202b67c4b2e..8beb0f07d999 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -601,8 +601,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1);
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0)
- printk(KERN_ERR PFX "Failed to allocate res %s : 0x%lx-0x%lx\n",
- res->name, res->start, res->end);
+ printk(KERN_ERR PFX "Failed to allocate res %s : "
+ "0x%llx-0x%llx\n", res->name,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
/*
* We don't know which region contains kernel data so we try
* it repeatedly and let the resource manager test it.
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index cfc683f153b9..fbdb933251b6 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/desc.h>
+#include <asm/dwarf2.h>
#include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4)
@@ -82,34 +83,76 @@ VM_MASK = 0x00020000
#define resume_kernel restore_nocheck
#endif
+#ifdef CONFIG_VM86
+#define resume_userspace_sig check_userspace
+#else
+#define resume_userspace_sig resume_userspace
+#endif
+
#define SAVE_ALL \
cld; \
pushl %es; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET es, 0;*/\
pushl %ds; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET ds, 0;*/\
pushl %eax; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET eax, 0;\
pushl %ebp; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET ebp, 0;\
pushl %edi; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET edi, 0;\
pushl %esi; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET esi, 0;\
pushl %edx; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET edx, 0;\
pushl %ecx; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET ecx, 0;\
pushl %ebx; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ CFI_REL_OFFSET ebx, 0;\
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
movl %edx, %es;
#define RESTORE_INT_REGS \
popl %ebx; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE ebx;\
popl %ecx; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE ecx;\
popl %edx; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE edx;\
popl %esi; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE esi;\
popl %edi; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE edi;\
popl %ebp; \
- popl %eax
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE ebp;\
+ popl %eax; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ CFI_RESTORE eax
#define RESTORE_REGS \
RESTORE_INT_REGS; \
1: popl %ds; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE ds;*/\
2: popl %es; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE es;*/\
.section .fixup,"ax"; \
3: movl $0,(%esp); \
jmp 1b; \
@@ -122,13 +165,43 @@ VM_MASK = 0x00020000
.long 2b,4b; \
.previous
+#define RING0_INT_FRAME \
+ CFI_STARTPROC simple;\
+ CFI_DEF_CFA esp, 3*4;\
+ /*CFI_OFFSET cs, -2*4;*/\
+ CFI_OFFSET eip, -3*4
+
+#define RING0_EC_FRAME \
+ CFI_STARTPROC simple;\
+ CFI_DEF_CFA esp, 4*4;\
+ /*CFI_OFFSET cs, -2*4;*/\
+ CFI_OFFSET eip, -3*4
+
+#define RING0_PTREGS_FRAME \
+ CFI_STARTPROC simple;\
+ CFI_DEF_CFA esp, OLDESP-EBX;\
+ /*CFI_OFFSET cs, CS-OLDESP;*/\
+ CFI_OFFSET eip, EIP-OLDESP;\
+ /*CFI_OFFSET es, ES-OLDESP;*/\
+ /*CFI_OFFSET ds, DS-OLDESP;*/\
+ CFI_OFFSET eax, EAX-OLDESP;\
+ CFI_OFFSET ebp, EBP-OLDESP;\
+ CFI_OFFSET edi, EDI-OLDESP;\
+ CFI_OFFSET esi, ESI-OLDESP;\
+ CFI_OFFSET edx, EDX-OLDESP;\
+ CFI_OFFSET ecx, ECX-OLDESP;\
+ CFI_OFFSET ebx, EBX-OLDESP
ENTRY(ret_from_fork)
+ CFI_STARTPROC
pushl %eax
+ CFI_ADJUST_CFA_OFFSET -4
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax
+ CFI_ADJUST_CFA_OFFSET -4
jmp syscall_exit
+ CFI_ENDPROC
/*
* Return to user mode is not as complex as all this looks,
@@ -139,10 +212,12 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing
ALIGN
+ RING0_PTREGS_FRAME
ret_from_exception:
preempt_stop
ret_from_intr:
GET_THREAD_INFO(%ebp)
+check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
@@ -171,20 +246,38 @@ need_resched:
call preempt_schedule_irq
jmp need_resched
#endif
+ CFI_ENDPROC
/* SYSENTER_RETURN points to after the "sysenter" instruction in
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
# sysenter call handler stub
ENTRY(sysenter_entry)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
movl TSS_sysenter_esp0(%esp),%esp
sysenter_past_esp:
sti
pushl $(__USER_DS)
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET ss, 0*/
pushl %ebp
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET esp, 0
pushfl
+ CFI_ADJUST_CFA_OFFSET 4
pushl $(__USER_CS)
- pushl $SYSENTER_RETURN
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
/*
* Load the potential sixth argument from user stack.
@@ -199,6 +292,7 @@ sysenter_past_esp:
.previous
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
@@ -219,11 +313,14 @@ sysenter_past_esp:
xorl %ebp,%ebp
sti
sysexit
+ CFI_ENDPROC
# system call handler stub
ENTRY(system_call)
+ RING0_INT_FRAME # can't unwind into user space anyway
pushl %eax # save orig_eax
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
testl $TF_MASK,EFLAGS(%esp)
@@ -256,10 +353,12 @@ restore_all:
movb CS(%esp), %al
andl $(VM_MASK | (4 << 8) | 3), %eax
cmpl $((4 << 8) | 3), %eax
+ CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
1: iret
.section .fixup,"ax"
iret_exc:
@@ -273,6 +372,7 @@ iret_exc:
.long 1b,iret_exc
.previous
+ CFI_RESTORE_STATE
ldt_ss:
larl OLDSS(%esp), %eax
jnz restore_nocheck
@@ -285,11 +385,13 @@ ldt_ss:
* CPUs, which we can try to work around to make
* dosemu and wine happy. */
subl $8, %esp # reserve space for switch16 pointer
+ CFI_ADJUST_CFA_OFFSET 8
cli
movl %esp, %eax
/* Set up the 16bit stack frame with switch32 pointer on top,
* and a switch16 pointer on top of the current frame. */
call setup_x86_bogus_stack
+ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
RESTORE_REGS
lss 20+4(%esp), %esp # switch to 16bit stack
1: iret
@@ -297,9 +399,11 @@ ldt_ss:
.align 4
.long 1b,iret_exc
.previous
+ CFI_ENDPROC
# perform work that needs to be done immediately before resumption
ALIGN
+ RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending:
testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
@@ -323,18 +427,20 @@ work_notifysig: # deal with pending signals and
# vm86-space
xorl %edx, %edx
call do_notify_resume
- jmp resume_userspace
+ jmp resume_userspace_sig
ALIGN
work_notifysig_v86:
#ifdef CONFIG_VM86
pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
call save_v86_state # %eax contains pt_regs pointer
popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
movl %eax, %esp
xorl %edx, %edx
call do_notify_resume
- jmp resume_userspace
+ jmp resume_userspace_sig
#endif
# perform syscall exit tracing
@@ -363,19 +469,21 @@ syscall_exit_work:
movl $1, %edx
call do_syscall_trace
jmp resume_userspace
+ CFI_ENDPROC
- ALIGN
+ RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault:
pushl %eax # save orig_eax
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
movl $-EFAULT,EAX(%esp)
jmp resume_userspace
- ALIGN
syscall_badsys:
movl $-ENOSYS,EAX(%esp)
jmp resume_userspace
+ CFI_ENDPROC
#define FIXUP_ESPFIX_STACK \
movl %esp, %eax; \
@@ -387,16 +495,21 @@ syscall_badsys:
movl %eax, %esp;
#define UNWIND_ESPFIX_STACK \
pushl %eax; \
+ CFI_ADJUST_CFA_OFFSET 4; \
movl %ss, %eax; \
/* see if on 16bit stack */ \
cmpw $__ESPFIX_SS, %ax; \
- jne 28f; \
- movl $__KERNEL_DS, %edx; \
- movl %edx, %ds; \
- movl %edx, %es; \
+ je 28f; \
+27: popl %eax; \
+ CFI_ADJUST_CFA_OFFSET -4; \
+.section .fixup,"ax"; \
+28: movl $__KERNEL_DS, %eax; \
+ movl %eax, %ds; \
+ movl %eax, %es; \
/* switch to 32bit stack */ \
- FIXUP_ESPFIX_STACK \
-28: popl %eax;
+ FIXUP_ESPFIX_STACK; \
+ jmp 27b; \
+.previous
/*
* Build the entry stubs and pointer table with
@@ -408,9 +521,14 @@ ENTRY(interrupt)
vector=0
ENTRY(irq_entries_start)
+ RING0_INT_FRAME
.rept NR_IRQS
ALIGN
-1: pushl $vector-256
+ .if vector
+ CFI_ADJUST_CFA_OFFSET -4
+ .endif
+1: pushl $~(vector)
+ CFI_ADJUST_CFA_OFFSET 4
jmp common_interrupt
.data
.long 1b
@@ -424,60 +542,99 @@ common_interrupt:
movl %esp,%eax
call do_IRQ
jmp ret_from_intr
+ CFI_ENDPROC
#define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \
- pushl $nr-256; \
- SAVE_ALL \
+ RING0_INT_FRAME; \
+ pushl $~(nr); \
+ CFI_ADJUST_CFA_OFFSET 4; \
+ SAVE_ALL; \
movl %esp,%eax; \
call smp_/**/name; \
- jmp ret_from_intr;
+ jmp ret_from_intr; \
+ CFI_ENDPROC
/* The include is where all of the SMP etc. interrupts come from */
#include "entry_arch.h"
ENTRY(divide_error)
+ RING0_INT_FRAME
pushl $0 # no error code
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_divide_error
+ CFI_ADJUST_CFA_OFFSET 4
ALIGN
error_code:
pushl %ds
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET ds, 0*/
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eax, 0
xorl %eax, %eax
pushl %ebp
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebp, 0
pushl %edi
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edi, 0
pushl %esi
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET esi, 0
pushl %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx, 0
decl %eax # eax = -1
pushl %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx, 0
pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
cld
pushl %es
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET es, 0*/
UNWIND_ESPFIX_STACK
popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ /*CFI_REGISTER es, ecx*/
movl ES(%esp), %edi # get the function address
movl ORIG_EAX(%esp), %edx # get the error code
movl %eax, ORIG_EAX(%esp)
movl %ecx, ES(%esp)
+ /*CFI_REL_OFFSET es, ES*/
movl $(__USER_DS), %ecx
movl %ecx, %ds
movl %ecx, %es
movl %esp,%eax # pt_regs pointer
call *%edi
jmp ret_from_exception
+ CFI_ENDPROC
ENTRY(coprocessor_error)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_coprocessor_error
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_simd_coprocessor_error
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(device_not_available)
+ RING0_INT_FRAME
pushl $-1 # mark this as an int
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
movl %cr0, %eax
testl $0x4, %eax # EM (math emulation bit)
@@ -487,9 +644,12 @@ ENTRY(device_not_available)
jmp ret_from_exception
device_not_available_emulate:
pushl $0 # temporary storage for ORIG_EIP
+ CFI_ADJUST_CFA_OFFSET 4
call math_emulate
addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
jmp ret_from_exception
+ CFI_ENDPROC
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
@@ -514,16 +674,19 @@ label: \
pushl $sysenter_past_esp
KPROBE_ENTRY(debug)
+ RING0_INT_FRAME
cmpl $sysenter_entry,(%esp)
jne debug_stack_correct
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
debug_stack_correct:
pushl $-1 # mark this as an int
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
xorl %edx,%edx # error code 0
movl %esp,%eax # pt_regs pointer
call do_debug
jmp ret_from_exception
+ CFI_ENDPROC
.previous .text
/*
* NMI is doubly nasty. It can happen _while_ we're handling
@@ -534,14 +697,18 @@ debug_stack_correct:
* fault happened on the sysenter path.
*/
ENTRY(nmi)
+ RING0_INT_FRAME
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl %eax
+ CFI_ADJUST_CFA_OFFSET -4
je nmi_16bit_stack
cmpl $sysenter_entry,(%esp)
je nmi_stack_fixup
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
movl %esp,%eax
/* Do not access memory above the end of our stack page,
* it might not exist.
@@ -549,16 +716,19 @@ ENTRY(nmi)
andl $(THREAD_SIZE-1),%eax
cmpl $(THREAD_SIZE-20),%eax
popl %eax
+ CFI_ADJUST_CFA_OFFSET -4
jae nmi_stack_correct
cmpl $sysenter_entry,12(%esp)
je nmi_debug_stack_check
nmi_stack_correct:
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
jmp restore_all
+ CFI_ENDPROC
nmi_stack_fixup:
FIX_STACK(12,nmi_stack_correct, 1)
@@ -574,94 +744,177 @@ nmi_debug_stack_check:
jmp nmi_stack_correct
nmi_16bit_stack:
+ RING0_INT_FRAME
/* create the pointer to lss back */
pushl %ss
+ CFI_ADJUST_CFA_OFFSET 4
pushl %esp
+ CFI_ADJUST_CFA_OFFSET 4
movzwl %sp, %esp
addw $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3
pushl 16(%esp)
+ CFI_ADJUST_CFA_OFFSET 4
.endr
pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp
+ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
xorl %edx,%edx # zero error code
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to 16bit stack
1: iret
+ CFI_ENDPROC
.section __ex_table,"a"
.align 4
.long 1b,iret_exc
.previous
KPROBE_ENTRY(int3)
+ RING0_INT_FRAME
pushl $-1 # mark this as an int
+ CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_int3
jmp ret_from_exception
+ CFI_ENDPROC
.previous .text
ENTRY(overflow)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_overflow
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(bounds)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_bounds
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(invalid_op)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_invalid_op
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_coprocessor_segment_overrun
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(invalid_TSS)
+ RING0_EC_FRAME
pushl $do_invalid_TSS
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(segment_not_present)
+ RING0_EC_FRAME
pushl $do_segment_not_present
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
ENTRY(stack_segment)
+ RING0_EC_FRAME
pushl $do_stack_segment
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
KPROBE_ENTRY(general_protection)
+ RING0_EC_FRAME
pushl $do_general_protection
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
.previous .text
ENTRY(alignment_check)
+ RING0_EC_FRAME
pushl $do_alignment_check
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
KPROBE_ENTRY(page_fault)
+ RING0_EC_FRAME
pushl $do_page_fault
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
.previous .text
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl machine_check_vector
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
#endif
ENTRY(spurious_interrupt_bug)
+ RING0_INT_FRAME
pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
pushl $do_spurious_interrupt_bug
+ CFI_ADJUST_CFA_OFFSET 4
jmp error_code
+ CFI_ENDPROC
+
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movl 4(%esp), %edx
+ movl (%esp), %ecx
+ leal 4(%esp), %eax
+ movl %ebx, EBX(%edx)
+ xorl %ebx, %ebx
+ movl %ebx, ECX(%edx)
+ movl %ebx, EDX(%edx)
+ movl %esi, ESI(%edx)
+ movl %edi, EDI(%edx)
+ movl %ebp, EBP(%edx)
+ movl %ebx, EAX(%edx)
+ movl $__USER_DS, DS(%edx)
+ movl $__USER_DS, ES(%edx)
+ movl %ebx, ORIG_EAX(%edx)
+ movl %ecx, EIP(%edx)
+ movl 12(%esp), %ecx
+ movl $__KERNEL_CS, CS(%edx)
+ movl %ebx, EFLAGS(%edx)
+ movl %eax, OLDESP(%edx)
+ movl 8(%esp), %eax
+ movl %ecx, 8(%esp)
+ movl EBX(%edx), %ebx
+ movl $__KERNEL_DS, OLDSS(%edx)
+ jmpl *%eax
+ CFI_ENDPROC
+ENDPROC(arch_unwind_init_running)
+#endif
.section .rodata,"a"
#include "syscall_table.S"
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
new file mode 100644
index 000000000000..c6737c35815d
--- /dev/null
+++ b/arch/i386/kernel/hpet.c
@@ -0,0 +1,67 @@
+#include <linux/clocksource.h>
+#include <linux/errno.h>
+#include <linux/hpet.h>
+#include <linux/init.h>
+
+#include <asm/hpet.h>
+#include <asm/io.h>
+
+#define HPET_MASK CLOCKSOURCE_MASK(32)
+#define HPET_SHIFT 22
+
+/* FSEC = 10^-15 NSEC = 10^-9 */
+#define FSEC_PER_NSEC 1000000
+
+static void *hpet_ptr;
+
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)readl(hpet_ptr);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = HPET_MASK,
+ .mult = 0, /* set below */
+ .shift = HPET_SHIFT,
+ .is_continuous = 1,
+};
+
+static int __init init_hpet_clocksource(void)
+{
+ unsigned long hpet_period;
+ void __iomem* hpet_base;
+ u64 tmp;
+
+ if (!hpet_address)
+ return -ENODEV;
+
+ /* calculate the hpet address: */
+ hpet_base =
+ (void __iomem*)ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
+ hpet_ptr = hpet_base + HPET_COUNTER;
+
+ /* calculate the frequency: */
+ hpet_period = readl(hpet_base + HPET_PERIOD);
+
+ /*
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+
+ return clocksource_register(&clocksource_hpet);
+}
+
+module_init(init_hpet_clocksource);
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
new file mode 100644
index 000000000000..477b24daff53
--- /dev/null
+++ b/arch/i386/kernel/i8253.c
@@ -0,0 +1,118 @@
+/*
+ * i8253.c 8253/PIT functions
+ *
+ */
+#include <linux/clocksource.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/sysdev.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/smp.h>
+#include <asm/delay.h>
+#include <asm/i8253.h>
+#include <asm/io.h>
+
+#include "io_ports.h"
+
+DEFINE_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
+
+void setup_pit_timer(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
+ udelay(10);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ udelay(10);
+ outb(LATCH >> 8 , PIT_CH0); /* MSB */
+ spin_unlock_irqrestore(&i8253_lock, flags);
+}
+
+/*
+ * Since the PIT overflows every tick, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static cycle_t pit_read(void)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ outb_p(0x00, PIT_MODE); /* latch the count ASAP */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
+
+ /* VIA686a test code... reset the latch if count > max + 1 */
+ if (count > LATCH) {
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff, PIT_CH0);
+ outb(LATCH >> 8, PIT_CH0);
+ count = LATCH - 1;
+ }
+
+ /*
+ * It's possible for count to appear to go the wrong way for a
+ * couple of reasons:
+ *
+ * 1. The timer counter underflows, but we haven't handled the
+ * resulting interrupt and incremented jiffies yet.
+ * 2. Hardware problem with the timer, not giving us continuous time,
+ * the counter does small "jumps" upwards on some Pentium systems,
+ * (see c't 95/10 page 335 for Neptun bug.)
+ *
+ * Previous attempts to handle these cases intelligently were
+ * buggy, so we just do the simple thing now.
+ */
+ if (count > old_count && jifs == old_jifs) {
+ count = old_count;
+ }
+ old_count = count;
+ old_jifs = jifs;
+
+ spin_unlock_irqrestore(&i8253_lock, flags);
+
+ count = (LATCH - 1) - count;
+
+ return (cycle_t)(jifs * LATCH) + count;
+}
+
+static struct clocksource clocksource_pit = {
+ .name = "pit",
+ .rating = 110,
+ .read = pit_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .mult = 0,
+ .shift = 20,
+};
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 4) /* PIT does not scale! */
+ return 0;
+
+ clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
+ return clocksource_register(&clocksource_pit);
+}
+module_init(init_pit_clocksource);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index b7636b96e104..3c6063671a9f 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -132,7 +132,7 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- irq_desc[irq].handler = &i8259A_irq_type;
+ irq_desc[irq].chip = &i8259A_irq_type;
enable_irq(irq);
}
@@ -175,7 +175,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
- * do without slowing down good hardware unnecesserily.
+ * do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
@@ -386,12 +386,12 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- irq_desc[i].handler = &i8259A_irq_type;
+ irq_desc[i].chip = &i8259A_irq_type;
} else {
/*
* 'high' PCI IRQs filled in on demand
*/
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
}
}
}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index a62df3e764c5..ec9ea0269d36 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -38,6 +38,7 @@
#include <asm/desc.h>
#include <asm/timer.h>
#include <asm/i8259.h>
+#include <asm/nmi.h>
#include <mach_apic.h>
@@ -50,6 +51,7 @@ atomic_t irq_mis_count;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
+static DEFINE_SPINLOCK(vector_lock);
int timer_over_8254 __initdata = 1;
@@ -579,7 +581,7 @@ static int balanced_irq(void *unused)
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++) {
- pending_irq_cpumask[i] = cpumask_of_cpu(0);
+ irq_desc[i].pending_mask = cpumask_of_cpu(0);
set_pending_irq(i, cpumask_of_cpu(0));
}
@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
+ unsigned long flags;
+ int vector;
+
+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
- BUG_ON(irq >= NR_IRQ_VECTORS);
- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
+ spin_lock_irqsave(&vector_lock, flags);
+
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+ spin_unlock_irqrestore(&vector_lock, flags);
return IO_APIC_VECTOR(irq);
+ }
next:
current_vector += 8;
if (current_vector == SYSCALL_VECTOR)
@@ -1172,16 +1181,21 @@ next:
if (current_vector >= FIRST_SYSTEM_VECTOR) {
offset++;
- if (!(offset%8))
+ if (!(offset%8)) {
+ spin_unlock_irqrestore(&vector_lock, flags);
return -ENOSPC;
+ }
current_vector = FIRST_DEVICE_VECTOR + offset;
}
- vector_irq[current_vector] = irq;
+ vector = current_vector;
+ vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = current_vector;
+ IO_APIC_VECTOR(irq) = vector;
+
+ spin_unlock_irqrestore(&vector_lock, flags);
- return current_vector;
+ return vector;
}
static struct hw_interrupt_type ioapic_level_type;
@@ -1191,23 +1205,18 @@ static struct hw_interrupt_type ioapic_edge_type;
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
-static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
- if (use_pci_vector() && !platform_legacy_irq(irq)) {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[vector].handler = &ioapic_level_type;
- else
- irq_desc[vector].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[vector]);
- } else {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[irq].handler = &ioapic_level_type;
- else
- irq_desc[irq].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[irq]);
- }
+ unsigned idx;
+
+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
+
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+ irq_desc[idx].chip = &ioapic_level_type;
+ else
+ irq_desc[idx].chip = &ioapic_edge_type;
+ set_intr_gate(vector, interrupt[idx]);
}
static void __init setup_IO_APIC_irqs(void)
@@ -1318,7 +1327,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- irq_desc[0].handler = &ioapic_edge_type;
+ irq_desc[0].chip = &ioapic_edge_type;
/*
* Add it to the IO-APIC irq-routing table:
@@ -2062,6 +2071,13 @@ static void set_ioapic_affinity_vector (unsigned int vector,
#endif
#endif
+static int ioapic_retrigger(unsigned int irq)
+{
+ send_IPI_self(IO_APIC_VECTOR(irq));
+
+ return 1;
+}
+
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2081,6 +2097,7 @@ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static struct hw_interrupt_type ioapic_level_type __read_mostly = {
@@ -2094,6 +2111,7 @@ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static inline void init_IO_APIC_traps(void)
@@ -2128,7 +2146,7 @@ static inline void init_IO_APIC_traps(void)
make_8259A_irq(irq);
else
/* Strange. Oh, well.. */
- irq_desc[irq].handler = &no_irq_type;
+ irq_desc[irq].chip = &no_irq_type;
}
}
}
@@ -2344,7 +2362,7 @@ static inline void check_timer(void)
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- irq_desc[0].handler = &lapic_irq_type;
+ irq_desc[0].chip = &lapic_irq_type;
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 49ce4c31b713..16b491703967 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -53,13 +53,19 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
*/
fastcall unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- int irq = regs->orig_eax & 0xff;
+ /* high bit used in ret_from_ code */
+ int irq = ~regs->orig_eax;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx;
u32 *isp;
#endif
+ if (unlikely((unsigned)irq >= NR_IRQS)) {
+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+ __FUNCTION__, irq);
+ BUG();
+ }
+
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
@@ -76,6 +82,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
}
#endif
+ if (!irq_desc[irq].handle_irq) {
+ __do_IRQ(irq, regs);
+ goto out_exit;
+ }
#ifdef CONFIG_4KSTACKS
curctx = (union irq_ctx *) current_thread_info();
@@ -100,8 +110,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
* softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
- irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK |
- curctx->tinfo.preempt_count & SOFTIRQ_MASK;
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
asm volatile(
" xchgl %%ebx,%%esp \n"
@@ -115,6 +125,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
#endif
__do_IRQ(irq, regs);
+out_exit:
irq_exit();
return 1;
@@ -227,7 +238,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
@@ -243,7 +254,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -285,13 +296,13 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].handler->set_affinity)
- irq_desc[irq].handler->set_affinity(irq, mask);
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 395a9a6dff88..727e419ad78a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -57,34 +57,85 @@ static __always_inline void set_jmp_op(void *from, void *to)
/*
* returns non-zero if opcodes can be boosted.
*/
-static __always_inline int can_boost(kprobe_opcode_t opcode)
+static __always_inline int can_boost(kprobe_opcode_t *opcodes)
{
- switch (opcode & 0xf0 ) {
+#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
+ (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
+ (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
+ (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
+ (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
+ << (row % 32))
+ /*
+ * Undefined/reserved opcodes, conditional jump, Opcode Extension
+ * Groups, and some special opcodes can not be boost.
+ */
+ static const unsigned long twobyte_is_boostable[256 / 32] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
+ W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
+ W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
+ W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
+ W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
+ W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
+ W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
+ W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
+ W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
+ W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
+ W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
+ W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
+ W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
+ W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
+ W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
+ W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ };
+#undef W
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+retry:
+ if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+ return 0;
+ opcode = *(opcodes++);
+
+ /* 2nd-byte opcode */
+ if (opcode == 0x0f) {
+ if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+ return 0;
+ return test_bit(*opcodes, twobyte_is_boostable);
+ }
+
+ switch (opcode & 0xf0) {
+ case 0x60:
+ if (0x63 < opcode && opcode < 0x67)
+ goto retry; /* prefixes */
+ /* can't boost Address-size override and bound */
+ return (opcode != 0x62 && opcode != 0x67);
case 0x70:
return 0; /* can't boost conditional jump */
- case 0x90:
- /* can't boost call and pushf */
- return opcode != 0x9a && opcode != 0x9c;
case 0xc0:
- /* can't boost undefined opcodes and soft-interruptions */
- return (0xc1 < opcode && opcode < 0xc6) ||
- (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ /* can't boost software-interruptions */
+ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
case 0xd0:
/* can boost AA* and XLAT */
return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
case 0xe0:
- /* can boost in/out and (may be) jmps */
- return (0xe3 < opcode && opcode != 0xe8);
+ /* can boost in/out and absolute jmps */
+ return ((opcode & 0x04) || opcode == 0xea);
case 0xf0:
+ if ((opcode & 0x0c) == 0 && opcode != 0xf1)
+ goto retry; /* lock/rep(ne) prefix */
/* clear and set flags can be boost */
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
default:
- /* currently, can't boost 2 bytes opcodes */
- return opcode != 0x0f;
+ if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
+ goto retry; /* prefixes */
+ /* can't boost CS override and call */
+ return (opcode != 0x2e && opcode != 0x9a);
}
}
-
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
@@ -109,7 +160,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
- if (can_boost(p->opcode)) {
+ if (can_boost(p->addr)) {
p->ainsn.boostable = 0;
} else {
p->ainsn.boostable = -1;
@@ -208,7 +259,9 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe_ctlblk *kcb;
#ifdef CONFIG_PREEMPT
unsigned pre_preempt_count = preempt_count();
-#endif /* CONFIG_PREEMPT */
+#else
+ unsigned pre_preempt_count = 1;
+#endif
addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
@@ -285,22 +338,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
/* handler has already set things up, so skip ss setup */
return 1;
- if (p->ainsn.boostable == 1 &&
-#ifdef CONFIG_PREEMPT
- !(pre_preempt_count) && /*
- * This enables booster when the direct
- * execution path aren't preempted.
- */
-#endif /* CONFIG_PREEMPT */
- !p->post_handler && !p->break_handler ) {
+ss_probe:
+ if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
regs->eip = (unsigned long)p->ainsn.insn;
preempt_enable_no_resched();
return 1;
}
-
-ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index f73d7374a2ba..511abe52a94e 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -133,9 +133,9 @@ typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
unsigned long start_address,
unsigned int has_pae) ATTRIB_NORET;
-const extern unsigned char relocate_new_kernel[];
+extern const unsigned char relocate_new_kernel[];
extern void relocate_new_kernel_end(void);
-const extern unsigned int relocate_new_kernel_size;
+extern const unsigned int relocate_new_kernel_size;
/*
* A architecture hook called to validate the
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 7a328230e540..d022cb8fd725 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti
return NOTIFY_OK;
}
-static struct notifier_block msr_class_cpu_notifier =
+static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
{
.notifier_call = msr_class_cpu_callback,
};
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d43b498ec745..a76e93146585 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -14,21 +14,17 @@
*/
#include <linux/config.h>
-#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/sysdev.h>
#include <linux/sysctl.h>
+#include <linux/percpu.h>
#include <asm/smp.h>
-#include <asm/div64.h>
#include <asm/nmi.h>
+#include <asm/intel_arch_perfmon.h>
#include "mach_traps.h"
@@ -100,6 +96,9 @@ int nmi_active;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
+#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
#ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog);
+static void disable_intel_arch_watchdog(void);
+
static void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
wrmsr(MSR_K7_EVNTSEL0, 0, 0);
break;
case X86_VENDOR_INTEL:
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ disable_intel_arch_watchdog();
+ break;
+ }
switch (boot_cpu_data.x86) {
case 6:
if (boot_cpu_data.x86_model > 0xd)
@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void)
return 1;
}
+static void disable_intel_arch_watchdog(void)
+{
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
+}
+
+static int setup_intel_arch_watchdog(void)
+{
+ unsigned int evntsel;
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ return 0;
+
+ nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
+
+ clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
+ clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
+
+ evntsel = ARCH_PERFMON_EVENTSEL_INT
+ | ARCH_PERFMON_EVENTSEL_OS
+ | ARCH_PERFMON_EVENTSEL_USR
+ | ARCH_PERFMON_NMI_EVENT_SEL
+ | ARCH_PERFMON_NMI_EVENT_UMASK;
+
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ write_watchdog_counter("INTEL_ARCH_PERFCTR0");
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ return 1;
+}
+
void setup_apic_nmi_watchdog (void)
{
switch (boot_cpu_data.x86_vendor) {
@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
setup_k7_watchdog();
break;
case X86_VENDOR_INTEL:
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ if (!setup_intel_arch_watchdog())
+ return;
+ break;
+ }
switch (boot_cpu_data.x86) {
case 6:
if (boot_cpu_data.x86_model > 0xd)
@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
- else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) {
+ else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
+ nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
/* Only P6 based Pentium M need to re-unmask
* the apic vector but it doesn't hurt
* other P6 variant */
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c
index 5f5b075f860a..0caf14652bad 100644
--- a/arch/i386/kernel/numaq.c
+++ b/arch/i386/kernel/numaq.c
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void)
return 1;
}
-static int __init numaq_dsc_disable(void)
+static int __init numaq_tsc_disable(void)
{
- printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
- tsc_disable = 1;
+ if (num_online_nodes() > 1) {
+ printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
+ tsc_disable = 1;
+ }
return 0;
}
-core_initcall(numaq_dsc_disable);
+arch_initcall(numaq_tsc_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6259afea46d1..6946b06e2784 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -102,7 +102,7 @@ void default_idle(void)
local_irq_enable();
if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
while (!need_resched()) {
local_irq_disable();
@@ -111,7 +111,7 @@ void default_idle(void)
else
local_irq_enable();
}
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
} else {
while (!need_resched())
cpu_relax();
@@ -174,7 +174,7 @@ void cpu_idle(void)
{
int cpu = smp_processor_id();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */
while (1) {
@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs)
cr3 = read_cr3();
cr4 = read_cr4_safe();
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
- show_trace(NULL, &regs->esp);
+ show_trace(NULL, regs, &regs->esp);
}
/*
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 321f5fd26e75..9bf590cefc7d 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scx200.h>
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = {
.probe = scx200_probe,
};
-static DEFINE_SPINLOCK(scx200_gpio_config_lock);
+static DEFINE_MUTEX(scx200_gpio_config_lock);
-static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static void __devinit scx200_init_shadow(void)
{
int bank;
+
+ /* read the current values driven on the GPIO signals */
+ for (bank = 0; bank < 2; ++bank)
+ scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
+}
+
+static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
unsigned base;
if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
}
scx200_gpio_base = base;
-
- /* read the current values driven on the GPIO signals */
- for (bank = 0; bank < 2; ++bank)
- scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
+ scx200_init_shadow();
} else {
/* find the base of the Configuration Block */
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
return 0;
}
-u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
+u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
{
u32 config, new_config;
- unsigned long flags;
- spin_lock_irqsave(&scx200_gpio_config_lock, flags);
+ mutex_lock(&scx200_gpio_config_lock);
outl(index, scx200_gpio_base + 0x20);
config = inl(scx200_gpio_base + 0x24);
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
new_config = (config & mask) | bits;
outl(new_config, scx200_gpio_base + 0x24);
- spin_unlock_irqrestore(&scx200_gpio_config_lock, flags);
+ mutex_unlock(&scx200_gpio_config_lock);
return config;
}
-#if 0
-void scx200_gpio_dump(unsigned index)
-{
- u32 config = scx200_gpio_configure(index, ~0, 0);
- printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config);
-
- if (config & 1)
- printk(" OE"); /* output enabled */
- else
- printk(" TS"); /* tristate */
- if (config & 2)
- printk(" PP"); /* push pull */
- else
- printk(" OD"); /* open drain */
- if (config & 4)
- printk(" PUE"); /* pull up enabled */
- else
- printk(" PUD"); /* pull up disabled */
- if (config & 8)
- printk(" LOCKED"); /* locked */
- if (config & 16)
- printk(" LEVEL"); /* level input */
- else
- printk(" EDGE"); /* edge input */
- if (config & 32)
- printk(" HI"); /* trigger on rising edge */
- else
- printk(" LO"); /* trigger on falling edge */
- if (config & 64)
- printk(" DEBOUNCE"); /* debounce */
- printk("\n");
-}
-#endif /* 0 */
-
static int __init scx200_init(void)
{
printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base);
EXPORT_SYMBOL(scx200_gpio_shadow);
EXPORT_SYMBOL(scx200_gpio_configure);
EXPORT_SYMBOL(scx200_cb_base);
-
-/*
- Local variables:
- compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 6c1639836e06..6712f0d2eb37 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -48,7 +48,6 @@
#include <linux/crash_dump.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
-#include <linux/suspend.h>
#include <video/edid.h>
@@ -1315,8 +1314,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
probe_roms();
for (i = 0; i < e820.nr_map; i++) {
struct resource *res;
+#ifndef CONFIG_RESOURCES_64BIT
if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
continue;
+#endif
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
switch (e820.map[i].type) {
case E820_RAM: res->name = "System RAM"; break;
@@ -1427,111 +1428,6 @@ static void set_mca_bus(int x)
static void set_mca_bus(int x) { }
#endif
-#ifdef CONFIG_SOFTWARE_SUSPEND
-static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
-{
- struct page *page;
- while (start <= end) {
- page = pfn_to_page(start);
- SetPageNosave(page);
- start++;
- }
-}
-
-static void __init e820_nosave_reserved_pages(void)
-{
- int i;
- unsigned long r_start = 0, r_end = 0;
-
- /* Assume e820 map is sorted */
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- unsigned long start, end;
-
- start = PFN_DOWN(ei->addr);
- end = PFN_UP(ei->addr + ei->size);
- if (start >= end)
- continue;
- if (ei->type == E820_RESERVED)
- continue;
- r_end = start;
- /*
- * Highmem 'Reserved' pages are marked as reserved, swsusp
- * will not save/restore them, so we ignore these pages here.
- */
- if (r_end > max_low_pfn)
- r_end = max_low_pfn;
- if (r_end > r_start)
- mark_nosave_page_range(r_start, r_end-1);
- if (r_end >= max_low_pfn)
- break;
- r_start = end;
- }
-}
-
-static void __init e820_save_acpi_pages(void)
-{
- int i;
-
- /* Assume e820 map is sorted */
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- unsigned long start, end;
-
- start = ei->addr;
- end = ei->addr + ei->size;
- if (start >= end)
- continue;
- if (ei->type != E820_ACPI && ei->type != E820_NVS)
- continue;
- /*
- * If the region is below max_low_pfn, it will be
- * saved/restored by swsusp follow 'RAM' type.
- */
- if (start < (max_low_pfn << PAGE_SHIFT))
- start = max_low_pfn << PAGE_SHIFT;
- /*
- * Highmem pages (ACPI NVS/Data) are reserved, but swsusp
- * highmem save/restore will not save/restore them. We marked
- * them as arch saveable pages here
- */
- if (end > start)
- swsusp_add_arch_pages(start, end);
- }
-}
-
-extern char __start_rodata, __end_rodata;
-/*
- * BIOS reserved region/hole - no save/restore
- * ACPI NVS - save/restore
- * ACPI Data - this is a little tricky, the mem could be used by OS after OS
- * reads tables from the region, but anyway save/restore the memory hasn't any
- * side effect and Linux runtime module load/unload might use it.
- * kernel rodata - no save/restore (kernel rodata isn't changed)
- */
-static int __init mark_nosave_pages(void)
-{
- unsigned long pfn_start, pfn_end;
-
- /* FIXME: provide a version for efi BIOS */
- if (efi_enabled)
- return 0;
- /* BIOS reserved regions & holes */
- e820_nosave_reserved_pages();
-
- /* kernel rodata */
- pfn_start = PFN_UP(virt_to_phys(&__start_rodata));
- pfn_end = PFN_DOWN(virt_to_phys(&__end_rodata));
- mark_nosave_page_range(pfn_start, pfn_end-1);
-
- /* record ACPI Data/NVS as saveable */
- e820_save_acpi_pages();
-
- return 0;
-}
-core_initcall(mark_nosave_pages);
-#endif
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -1681,6 +1577,7 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+ tsc_init();
}
static __init int add_pcspkr(void)
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 5c352c3a9e7f..43002cfb40c4 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
goto give_sigsegv;
}
- restorer = &__kernel_sigreturn;
+ restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv;
/* Set up to return from userspace. */
- restorer = &__kernel_rt_sigreturn;
+ restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index d134e9643a58..c10789d7a9d3 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
static inline int __prepare_ICR (unsigned int shortcut, int vector)
{
- return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
+ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
+
+ switch (vector) {
+ default:
+ icr |= APIC_DM_FIXED | vector;
+ break;
+ case NMI_VECTOR:
+ icr |= APIC_DM_NMI;
+ break;
+ }
+ return icr;
}
static inline int __prepare_ICR2 (unsigned int mask)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bd0ca5c9f053..89e7315e539c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -52,6 +52,7 @@
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/arch_hooks.h>
+#include <asm/nmi.h>
#include <mach_apic.h>
#include <mach_wakecpu.h>
@@ -66,12 +67,6 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
#endif
-/* Package ID of each logical CPU */
-int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-
-/* Core ID of each logical CPU */
-int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-
/* Last level cache ID of each logical CPU */
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
@@ -453,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu)
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
- * TBD: when power saving sched policy is added, we will return
- * cpu_core_map when power saving policy is enabled
+ * And for power savings, we return cpu_core_map
*/
- return c->llc_shared_map;
+ if (sched_mc_power_savings || sched_smt_power_savings)
+ return cpu_core_map[cpu];
+ else
+ return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
@@ -472,8 +469,8 @@ set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (phys_proc_id[cpu] == phys_proc_id[i] &&
- cpu_core_id[cpu] == cpu_core_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
+ c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +497,7 @@ set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
@@ -1055,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
struct warm_boot_cpu_info info;
struct work_struct task;
int apicid, ret;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
apicid = x86_cpu_to_apicid[cpu];
if (apicid == BAD_APICID) {
@@ -1062,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
goto exit;
}
+ /*
+ * the CPU isn't initialized at boot time, allocate gdt table here.
+ * cpu_init will initialize it
+ */
+ if (!cpu_gdt_descr->address) {
+ cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
+ if (!cpu_gdt_descr->address)
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
@@ -1339,8 +1349,8 @@ remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
- phys_proc_id[cpu] = BAD_APICID;
- cpu_core_id[cpu] = BAD_APICID;
+ c[cpu].phys_proc_id = 0;
+ c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0bada1870bdf..713ba39d32c6 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -2,6 +2,8 @@
* linux/arch/i386/kernel/sysenter.c
*
* (C) Copyright 2002 Linus Torvalds
+ * Portions based on the vdso-randomization code from exec-shield:
+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
*
* This file contains the needed initializations to support sysenter.
*/
@@ -13,12 +15,31 @@
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/module.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/pgtable.h>
#include <asm/unistd.h>
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso_enabled = 1;
+
+EXPORT_SYMBOL_GPL(vdso_enabled);
+
+static int __init vdso_setup(char *s)
+{
+ vdso_enabled = simple_strtoul(s, NULL, 0);
+
+ return 1;
+}
+
+__setup("vdso=", vdso_setup);
+
extern asmlinkage void sysenter_entry(void);
void enable_sep_cpu(void)
@@ -45,23 +66,120 @@ void enable_sep_cpu(void)
*/
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+static void *syscall_page;
int __init sysenter_setup(void)
{
- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+#ifdef CONFIG_COMPAT_VDSO
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
+ printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+#else
+ /*
+ * In the non-compat case the ELF coredumping code needs the fixmap:
+ */
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
+#endif
if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_int80_start,
&vsyscall_int80_end - &vsyscall_int80_start);
return 0;
}
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start);
return 0;
}
+
+static struct page *syscall_nopage(struct vm_area_struct *vma,
+ unsigned long adr, int *type)
+{
+ struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
+ get_page(p);
+ return p;
+}
+
+/* Prevent VMA merging */
+static void syscall_vma_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct syscall_vm_ops = {
+ .close = syscall_vma_close,
+ .nopage = syscall_nopage,
+};
+
+/* Defined in vsyscall-sysenter.S */
+extern void SYSENTER_RETURN;
+
+/* Setup a VMA at program startup for the vsyscall page */
+int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto up_fail;
+ }
+
+ vma->vm_start = addr;
+ vma->vm_end = addr + PAGE_SIZE;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall_vm_ops;
+ vma->vm_mm = mm;
+
+ ret = insert_vm_struct(mm, vma);
+ if (unlikely(ret)) {
+ kmem_cache_free(vm_area_cachep, vma);
+ goto up_fail;
+ }
+
+ current->mm->context.vdso = (void *)addr;
+ current_thread_info()->sysenter_return =
+ (void *)VDSO_SYM(&SYSENTER_RETURN);
+ mm->total_vm++;
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+ return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+ return NULL;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ return 0;
+}
+
+int in_gate_area_no_task(unsigned long addr)
+{
+ return 0;
+}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 9d3074759856..5f43d0410122 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -82,13 +82,6 @@ extern unsigned long wall_jiffies;
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-#include <asm/i8253.h>
-
-DEFINE_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
-
-struct timer_opts *cur_timer __read_mostly = &timer_none;
-
/*
* This is a special lock that is owned by the CPU and holds the index
* register we are working with. It is required for NMI access to the
@@ -118,99 +111,19 @@ void rtc_cmos_write(unsigned char val, unsigned char addr)
}
EXPORT_SYMBOL(rtc_cmos_write);
-/*
- * This version of gettimeofday has microsecond resolution
- * and better than microsecond precision on fast x86 machines with TSC.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long seq;
- unsigned long usec, sec;
- unsigned long max_ntp_tick;
-
- do {
- unsigned long lost;
-
- seq = read_seqbegin(&xtime_lock);
-
- usec = cur_timer->get_offset();
- lost = jiffies - wall_jiffies;
-
- /*
- * If time_adjust is negative then NTP is slowing the clock
- * so make sure not to go into next possible interval.
- * Better to lose some accuracy than have time go backwards..
- */
- if (unlikely(time_adjust < 0)) {
- max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
- usec = min(usec, max_ntp_tick);
-
- if (lost)
- usec += lost * max_ntp_tick;
- }
- else if (unlikely(lost))
- usec += lost * (USEC_PER_SEC / HZ);
-
- sec = xtime.tv_sec;
- usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry(&xtime_lock, seq));
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
- nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
static int set_rtc_mmss(unsigned long nowtime)
{
int retval;
-
- WARN_ON(irqs_disabled());
+ unsigned long flags;
/* gets recalled with irq locally disabled */
- spin_lock_irq(&rtc_lock);
+ /* XXX - does irqsave resolve this? -johnstul */
+ spin_lock_irqsave(&rtc_lock, flags);
if (efi_enabled)
retval = efi_set_rtc_mmss(nowtime);
else
retval = mach_set_rtc_mmss(nowtime);
- spin_unlock_irq(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
}
@@ -218,16 +131,6 @@ static int set_rtc_mmss(unsigned long nowtime)
int timer_ack;
-/* monotonic_clock(): returns # of nanoseconds passed since time_init()
- * Note: This function is required to return accurate
- * time even in the absence of multiple timer ticks.
- */
-unsigned long long monotonic_clock(void)
-{
- return cur_timer->monotonic_clock();
-}
-EXPORT_SYMBOL(monotonic_clock);
-
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -242,11 +145,21 @@ EXPORT_SYMBOL(profile_pc);
#endif
/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
*/
-static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+
#ifdef CONFIG_X86_IO_APIC
if (timer_ack) {
/*
@@ -279,27 +192,6 @@ static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
irq = inb_p( 0x61 ); /* read the current state */
outb_p( irq|0x80, 0x61 ); /* reset the IRQ */
}
-}
-
-/*
- * This is the same as the above, except we _also_ save the current
- * Time Stamp Counter value at the time of the timer interrupt, so that
- * we later on can estimate the time of day more exactly.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
-
- cur_timer->mark_offset();
-
- do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
@@ -380,7 +272,6 @@ void notify_arch_cmos_timer(void)
static long clock_cmos_diff, sleep_start;
-static struct timer_opts *last_timer;
static int timer_suspend(struct sys_device *dev, pm_message_t state)
{
/*
@@ -389,10 +280,6 @@ static int timer_suspend(struct sys_device *dev, pm_message_t state)
clock_cmos_diff = -get_cmos_time();
clock_cmos_diff += get_seconds();
sleep_start = get_cmos_time();
- last_timer = cur_timer;
- cur_timer = &timer_none;
- if (last_timer->suspend)
- last_timer->suspend(state);
return 0;
}
@@ -415,10 +302,6 @@ static int timer_resume(struct sys_device *dev)
jiffies_64 += sleep_length;
wall_jiffies += sleep_length;
write_sequnlock_irqrestore(&xtime_lock, flags);
- if (last_timer->resume)
- last_timer->resume();
- cur_timer = last_timer;
- last_timer = NULL;
touch_softlockup_watchdog();
return 0;
}
@@ -460,9 +343,6 @@ static void __init hpet_time_init(void)
printk("Using HPET for base-timer\n");
}
- cur_timer = select_timer();
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
time_init_hook();
}
#endif
@@ -484,8 +364,5 @@ void __init time_init(void)
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- cur_timer = select_timer();
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
time_init_hook();
}
diff --git a/arch/i386/kernel/timers/Makefile b/arch/i386/kernel/timers/Makefile
deleted file mode 100644
index 8fa12be658dd..000000000000
--- a/arch/i386/kernel/timers/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for x86 timers
-#
-
-obj-y := timer.o timer_none.o timer_tsc.o timer_pit.o common.o
-
-obj-$(CONFIG_X86_CYCLONE_TIMER) += timer_cyclone.o
-obj-$(CONFIG_HPET_TIMER) += timer_hpet.o
-obj-$(CONFIG_X86_PM_TIMER) += timer_pm.o
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
deleted file mode 100644
index 8163fe0cf1f0..000000000000
--- a/arch/i386/kernel/timers/common.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Common functions used across the timers go here
- */
-
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/timer.h>
-#include <asm/hpet.h>
-
-#include "mach_timer.h"
-
-/* ------ Calibrate the TSC -------
- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
- * Too much 64-bit arithmetic here to do this cleanly in C, and for
- * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
- * output busy loop as low as possible. We avoid reading the CTC registers
- * directly because of the awkward 8-bit access mechanism of the 82C54
- * device.
- */
-
-#define CALIBRATE_TIME (5 * 1000020/HZ)
-
-unsigned long calibrate_tsc(void)
-{
- mach_prepare_counter();
-
- {
- unsigned long startlow, starthigh;
- unsigned long endlow, endhigh;
- unsigned long count;
-
- rdtsc(startlow,starthigh);
- mach_countup(&count);
- rdtsc(endlow,endhigh);
-
-
- /* Error: ECTCNEVERSET */
- if (count <= 1)
- goto bad_ctc;
-
- /* 64-bit subtract - gcc just messes up with long longs */
- __asm__("subl %2,%0\n\t"
- "sbbl %3,%1"
- :"=a" (endlow), "=d" (endhigh)
- :"g" (startlow), "g" (starthigh),
- "0" (endlow), "1" (endhigh));
-
- /* Error: ECPUTOOFAST */
- if (endhigh)
- goto bad_ctc;
-
- /* Error: ECPUTOOSLOW */
- if (endlow <= CALIBRATE_TIME)
- goto bad_ctc;
-
- __asm__("divl %2"
- :"=a" (endlow), "=d" (endhigh)
- :"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
-
- return endlow;
- }
-
- /*
- * The CTC wasn't reliable: we got a hit on the very first read,
- * or the CPU was so fast/slow that the quotient wouldn't fit in
- * 32 bits..
- */
-bad_ctc:
- return 0;
-}
-
-#ifdef CONFIG_HPET_TIMER
-/* ------ Calibrate the TSC using HPET -------
- * Return 2^32 * (1 / (TSC clocks per usec)) for getting the CPU freq.
- * Second output is parameter 1 (when non NULL)
- * Set 2^32 * (1 / (tsc per HPET clk)) for delay_hpet().
- * calibrate_tsc() calibrates the processor TSC by comparing
- * it to the HPET timer of known frequency.
- * Too much 64-bit arithmetic here to do this cleanly in C
- */
-#define CALIBRATE_CNT_HPET (5 * hpet_tick)
-#define CALIBRATE_TIME_HPET (5 * KERNEL_TICK_USEC)
-
-unsigned long __devinit calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
-{
- unsigned long tsc_startlow, tsc_starthigh;
- unsigned long tsc_endlow, tsc_endhigh;
- unsigned long hpet_start, hpet_end;
- unsigned long result, remain;
-
- hpet_start = hpet_readl(HPET_COUNTER);
- rdtsc(tsc_startlow, tsc_starthigh);
- do {
- hpet_end = hpet_readl(HPET_COUNTER);
- } while ((hpet_end - hpet_start) < CALIBRATE_CNT_HPET);
- rdtsc(tsc_endlow, tsc_endhigh);
-
- /* 64-bit subtract - gcc just messes up with long longs */
- __asm__("subl %2,%0\n\t"
- "sbbl %3,%1"
- :"=a" (tsc_endlow), "=d" (tsc_endhigh)
- :"g" (tsc_startlow), "g" (tsc_starthigh),
- "0" (tsc_endlow), "1" (tsc_endhigh));
-
- /* Error: ECPUTOOFAST */
- if (tsc_endhigh)
- goto bad_calibration;
-
- /* Error: ECPUTOOSLOW */
- if (tsc_endlow <= CALIBRATE_TIME_HPET)
- goto bad_calibration;
-
- ASM_DIV64_REG(result, remain, tsc_endlow, 0, CALIBRATE_TIME_HPET);
- if (remain > (tsc_endlow >> 1))
- result++; /* rounding the result */
-
- if (tsc_hpet_quotient_ptr) {
- unsigned long tsc_hpet_quotient;
-
- ASM_DIV64_REG(tsc_hpet_quotient, remain, tsc_endlow, 0,
- CALIBRATE_CNT_HPET);
- if (remain > (tsc_endlow >> 1))
- tsc_hpet_quotient++; /* rounding the result */
- *tsc_hpet_quotient_ptr = tsc_hpet_quotient;
- }
-
- return result;
-bad_calibration:
- /*
- * the CPU was so fast/slow that the quotient wouldn't fit in
- * 32 bits..
- */
- return 0;
-}
-#endif
-
-
-unsigned long read_timer_tsc(void)
-{
- unsigned long retval;
- rdtscl(retval);
- return retval;
-}
-
-
-/* calculate cpu_khz */
-void init_cpu_khz(void)
-{
- if (cpu_has_tsc) {
- unsigned long tsc_quotient = calibrate_tsc();
- if (tsc_quotient) {
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000;
- __asm__("divl %2"
- :"=a" (cpu_khz), "=d" (edx)
- :"r" (tsc_quotient),
- "0" (eax), "1" (edx));
- printk("Detected %u.%03u MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
- }
- }
- }
-}
-
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
deleted file mode 100644
index 7e39ed8e33f8..000000000000
--- a/arch/i386/kernel/timers/timer.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <asm/timer.h>
-
-#ifdef CONFIG_HPET_TIMER
-/*
- * HPET memory read is slower than tsc reads, but is more dependable as it
- * always runs at constant frequency and reduces complexity due to
- * cpufreq. So, we prefer HPET timer to tsc based one. Also, we cannot use
- * timer_pit when HPET is active. So, we default to timer_tsc.
- */
-#endif
-/* list of timers, ordered by preference, NULL terminated */
-static struct init_timer_opts* __initdata timers[] = {
-#ifdef CONFIG_X86_CYCLONE_TIMER
- &timer_cyclone_init,
-#endif
-#ifdef CONFIG_HPET_TIMER
- &timer_hpet_init,
-#endif
-#ifdef CONFIG_X86_PM_TIMER
- &timer_pmtmr_init,
-#endif
- &timer_tsc_init,
- &timer_pit_init,
- NULL,
-};
-
-static char clock_override[10] __initdata;
-
-static int __init clock_setup(char* str)
-{
- if (str)
- strlcpy(clock_override, str, sizeof(clock_override));
- return 1;
-}
-__setup("clock=", clock_setup);
-
-
-/* The chosen timesource has been found to be bad.
- * Fall back to a known good timesource (the PIT)
- */
-void clock_fallback(void)
-{
- cur_timer = &timer_pit;
-}
-
-/* iterates through the list of timers, returning the first
- * one that initializes successfully.
- */
-struct timer_opts* __init select_timer(void)
-{
- int i = 0;
-
- /* find most preferred working timer */
- while (timers[i]) {
- if (timers[i]->init)
- if (timers[i]->init(clock_override) == 0)
- return timers[i]->opts;
- ++i;
- }
-
- panic("select_timer: Cannot find a suitable timer\n");
- return NULL;
-}
-
-int read_current_timer(unsigned long *timer_val)
-{
- if (cur_timer->read_timer) {
- *timer_val = cur_timer->read_timer();
- return 0;
- }
- return -1;
-}
diff --git a/arch/i386/kernel/timers/timer_cyclone.c b/arch/i386/kernel/timers/timer_cyclone.c
deleted file mode 100644
index 13892a65c941..000000000000
--- a/arch/i386/kernel/timers/timer_cyclone.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* Cyclone-timer:
- * This code implements timer_ops for the cyclone counter found
- * on IBM x440, x360, and other Summit based systems.
- *
- * Copyright (C) 2002 IBM, John Stultz (johnstul@us.ibm.com)
- */
-
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/fixmap.h>
-#include <asm/i8253.h>
-
-#include "io_ports.h"
-
-/* Number of usecs that the last interrupt was delayed */
-static int delay_at_last_interrupt;
-
-#define CYCLONE_CBAR_ADDR 0xFEB00CD0
-#define CYCLONE_PMCC_OFFSET 0x51A0
-#define CYCLONE_MPMC_OFFSET 0x51D0
-#define CYCLONE_MPCS_OFFSET 0x51A8
-#define CYCLONE_TIMER_FREQ 100000000
-#define CYCLONE_TIMER_MASK (((u64)1<<40)-1) /* 40 bit mask */
-int use_cyclone = 0;
-
-static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
-static u32 last_cyclone_low;
-static u32 last_cyclone_high;
-static unsigned long long monotonic_base;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* helper macro to atomically read both cyclone counter registers */
-#define read_cyclone_counter(low,high) \
- do{ \
- high = cyclone_timer[1]; low = cyclone_timer[0]; \
- } while (high != cyclone_timer[1]);
-
-
-static void mark_offset_cyclone(void)
-{
- unsigned long lost, delay;
- unsigned long delta = last_cyclone_low;
- int count;
- unsigned long long this_offset, last_offset;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
-
- spin_lock(&i8253_lock);
- read_cyclone_counter(last_cyclone_low,last_cyclone_high);
-
- /* read values for delay_at_last_interrupt */
- outb_p(0x00, 0x43); /* latch the count ASAP */
-
- count = inb_p(0x40); /* read the latched count */
- count |= inb(0x40) << 8;
-
- /*
- * VIA686a test code... reset the latch if count > max + 1
- * from timer_pit.c - cjb
- */
- if (count > LATCH) {
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff, PIT_CH0);
- outb(LATCH >> 8, PIT_CH0);
- count = LATCH - 1;
- }
- spin_unlock(&i8253_lock);
-
- /* lost tick compensation */
- delta = last_cyclone_low - delta;
- delta /= (CYCLONE_TIMER_FREQ/1000000);
- delta += delay_at_last_interrupt;
- lost = delta/(1000000/HZ);
- delay = delta%(1000000/HZ);
- if (lost >= 2)
- jiffies_64 += lost-1;
-
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
- monotonic_base += (this_offset - last_offset) & CYCLONE_TIMER_MASK;
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- count = ((LATCH-1) - count) * TICK_SIZE;
- delay_at_last_interrupt = (count + LATCH/2) / LATCH;
-
-
- /* catch corner case where tick rollover occured
- * between cyclone and pit reads (as noted when
- * usec delta is > 90% # of usecs/tick)
- */
- if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
- jiffies_64++;
-}
-
-static unsigned long get_offset_cyclone(void)
-{
- u32 offset;
-
- if(!cyclone_timer)
- return delay_at_last_interrupt;
-
- /* Read the cyclone timer */
- offset = cyclone_timer[0];
-
- /* .. relative to previous jiffy */
- offset = offset - last_cyclone_low;
-
- /* convert cyclone ticks to microseconds */
- /* XXX slow, can we speed this up? */
- offset = offset/(CYCLONE_TIMER_FREQ/1000000);
-
- /* our adjusted time offset in microseconds */
- return delay_at_last_interrupt + offset;
-}
-
-static unsigned long long monotonic_clock_cyclone(void)
-{
- u32 now_low, now_high;
- unsigned long long last_offset, this_offset, base;
- unsigned long long ret;
- unsigned seq;
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
-
- /* Read the cyclone counter */
- read_cyclone_counter(now_low,now_high);
- this_offset = ((unsigned long long)now_high<<32)|now_low;
-
- /* convert to nanoseconds */
- ret = base + ((this_offset - last_offset)&CYCLONE_TIMER_MASK);
- return ret * (1000000000 / CYCLONE_TIMER_FREQ);
-}
-
-static int __init init_cyclone(char* override)
-{
- u32* reg;
- u32 base; /* saved cyclone base address */
- u32 pageaddr; /* page that contains cyclone_timer register */
- u32 offset; /* offset from pageaddr to cyclone_timer register */
- int i;
-
- /* check clock override */
- if (override[0] && strncmp(override,"cyclone",7))
- return -ENODEV;
-
- /*make sure we're on a summit box*/
- if(!use_cyclone) return -ENODEV;
-
- printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
-
- /* find base address */
- pageaddr = (CYCLONE_CBAR_ADDR)&PAGE_MASK;
- offset = (CYCLONE_CBAR_ADDR)&(~PAGE_MASK);
- set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
- reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
- return -ENODEV;
- }
- base = *reg;
- if(!base){
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
- return -ENODEV;
- }
-
- /* setup PMCC */
- pageaddr = (base + CYCLONE_PMCC_OFFSET)&PAGE_MASK;
- offset = (base + CYCLONE_PMCC_OFFSET)&(~PAGE_MASK);
- set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
- reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
- return -ENODEV;
- }
- reg[0] = 0x00000001;
-
- /* setup MPCS */
- pageaddr = (base + CYCLONE_MPCS_OFFSET)&PAGE_MASK;
- offset = (base + CYCLONE_MPCS_OFFSET)&(~PAGE_MASK);
- set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
- reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
- if(!reg){
- printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
- return -ENODEV;
- }
- reg[0] = 0x00000001;
-
- /* map in cyclone_timer */
- pageaddr = (base + CYCLONE_MPMC_OFFSET)&PAGE_MASK;
- offset = (base + CYCLONE_MPMC_OFFSET)&(~PAGE_MASK);
- set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
- cyclone_timer = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
- if(!cyclone_timer){
- printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
- return -ENODEV;
- }
-
- /*quick test to make sure its ticking*/
- for(i=0; i<3; i++){
- u32 old = cyclone_timer[0];
- int stall = 100;
- while(stall--) barrier();
- if(cyclone_timer[0] == old){
- printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
- cyclone_timer = 0;
- return -ENODEV;
- }
- }
-
- init_cpu_khz();
-
- /* Everything looks good! */
- return 0;
-}
-
-
-static void delay_cyclone(unsigned long loops)
-{
- unsigned long bclock, now;
- if(!cyclone_timer)
- return;
- bclock = cyclone_timer[0];
- do {
- rep_nop();
- now = cyclone_timer[0];
- } while ((now-bclock) < loops);
-}
-/************************************************************/
-
-/* cyclone timer_opts struct */
-static struct timer_opts timer_cyclone = {
- .name = "cyclone",
- .mark_offset = mark_offset_cyclone,
- .get_offset = get_offset_cyclone,
- .monotonic_clock = monotonic_clock_cyclone,
- .delay = delay_cyclone,
-};
-
-struct init_timer_opts __initdata timer_cyclone_init = {
- .init = init_cyclone,
- .opts = &timer_cyclone,
-};
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
deleted file mode 100644
index 17a6fe7166e7..000000000000
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- */
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-
-#include "io_ports.h"
-#include "mach_timer.h"
-#include <asm/hpet.h>
-
-static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
-static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
-static unsigned long hpet_last; /* hpet counter value at last tick*/
-static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
-static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
-static unsigned long long monotonic_base;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better percision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-static unsigned long cyc2ns_scale __read_mostly;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
-{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
-static unsigned long long monotonic_clock_hpet(void)
-{
- unsigned long long last_offset, this_offset, base;
- unsigned seq;
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return base + cycles_2_ns(this_offset - last_offset);
-}
-
-static unsigned long get_offset_hpet(void)
-{
- register unsigned long eax, edx;
-
- eax = hpet_readl(HPET_COUNTER);
- eax -= hpet_last; /* hpet delta */
- eax = min(hpet_tick, eax);
- /*
- * Time offset = (hpet delta) * ( usecs per HPET clock )
- * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
- * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
- *
- * Where,
- * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
- *
- * Using a mull instead of a divl saves some cycles in critical path.
- */
- ASM_MUL64_REG(eax, edx, hpet_usec_quotient, eax);
-
- /* our adjusted time offset in microseconds */
- return edx;
-}
-
-static void mark_offset_hpet(void)
-{
- unsigned long long this_offset, last_offset;
- unsigned long offset;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- rdtsc(last_tsc_low, last_tsc_high);
-
- if (hpet_use_timer)
- offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
- else
- offset = hpet_readl(HPET_COUNTER);
- if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) {
- int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
- jiffies_64 += lost_ticks;
- }
- hpet_last = offset;
-
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-}
-
-static void delay_hpet(unsigned long loops)
-{
- unsigned long hpet_start, hpet_end;
- unsigned long eax;
-
- /* loops is the number of cpu cycles. Convert it to hpet clocks */
- ASM_MUL64_REG(eax, loops, tsc_hpet_quotient, loops);
-
- hpet_start = hpet_readl(HPET_COUNTER);
- do {
- rep_nop();
- hpet_end = hpet_readl(HPET_COUNTER);
- } while ((hpet_end - hpet_start) < (loops));
-}
-
-static struct timer_opts timer_hpet;
-
-static int __init init_hpet(char* override)
-{
- unsigned long result, remain;
-
- /* check clock override */
- if (override[0] && strncmp(override,"hpet",4))
- return -ENODEV;
-
- if (!is_hpet_enabled())
- return -ENODEV;
-
- printk("Using HPET for gettimeofday\n");
- if (cpu_has_tsc) {
- unsigned long tsc_quotient = calibrate_tsc_hpet(&tsc_hpet_quotient);
- if (tsc_quotient) {
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000;
- ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
- eax, edx);
- printk("Detected %u.%03u MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
- }
- set_cyc2ns_scale(cpu_khz);
- }
- /* set this only when cpu_has_tsc */
- timer_hpet.read_timer = read_timer_tsc;
- }
-
- /*
- * Math to calculate hpet to usec multiplier
- * Look for the comments at get_offset_hpet()
- */
- ASM_DIV64_REG(result, remain, hpet_tick, 0, KERNEL_TICK_USEC);
- if (remain > (hpet_tick >> 1))
- result++; /* rounding the result */
- hpet_usec_quotient = result;
-
- return 0;
-}
-
-static int hpet_resume(void)
-{
- write_seqlock(&monotonic_lock);
- /* Assume this is the last mark offset time */
- rdtsc(last_tsc_low, last_tsc_high);
-
- if (hpet_use_timer)
- hpet_last = hpet_readl(HPET_T0_CMP) - hpet_tick;
- else
- hpet_last = hpet_readl(HPET_COUNTER);
- write_sequnlock(&monotonic_lock);
- return 0;
-}
-/************************************************************/
-
-/* tsc timer_opts struct */
-static struct timer_opts timer_hpet __read_mostly = {
- .name = "hpet",
- .mark_offset = mark_offset_hpet,
- .get_offset = get_offset_hpet,
- .monotonic_clock = monotonic_clock_hpet,
- .delay = delay_hpet,
- .resume = hpet_resume,
-};
-
-struct init_timer_opts __initdata timer_hpet_init = {
- .init = init_hpet,
- .opts = &timer_hpet,
-};
diff --git a/arch/i386/kernel/timers/timer_none.c b/arch/i386/kernel/timers/timer_none.c
deleted file mode 100644
index 4ea2f414dbbd..000000000000
--- a/arch/i386/kernel/timers/timer_none.c
+++ /dev/null
@@ -1,39 +0,0 @@
-#include <linux/init.h>
-#include <asm/timer.h>
-
-static void mark_offset_none(void)
-{
- /* nothing needed */
-}
-
-static unsigned long get_offset_none(void)
-{
- return 0;
-}
-
-static unsigned long long monotonic_clock_none(void)
-{
- return 0;
-}
-
-static void delay_none(unsigned long loops)
-{
- int d0;
- __asm__ __volatile__(
- "\tjmp 1f\n"
- ".align 16\n"
- "1:\tjmp 2f\n"
- ".align 16\n"
- "2:\tdecl %0\n\tjns 2b"
- :"=&a" (d0)
- :"0" (loops));
-}
-
-/* none timer_opts struct */
-struct timer_opts timer_none = {
- .name = "none",
- .mark_offset = mark_offset_none,
- .get_offset = get_offset_none,
- .monotonic_clock = monotonic_clock_none,
- .delay = delay_none,
-};
diff --git a/arch/i386/kernel/timers/timer_pit.c b/arch/i386/kernel/timers/timer_pit.c
deleted file mode 100644
index b9b6bd56b9ba..000000000000
--- a/arch/i386/kernel/timers/timer_pit.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/sysdev.h>
-#include <linux/timex.h>
-#include <asm/delay.h>
-#include <asm/mpspec.h>
-#include <asm/timer.h>
-#include <asm/smp.h>
-#include <asm/io.h>
-#include <asm/arch_hooks.h>
-#include <asm/i8253.h>
-
-#include "do_timer.h"
-#include "io_ports.h"
-
-static int count_p; /* counter in get_offset_pit() */
-
-static int __init init_pit(char* override)
-{
- /* check clock override */
- if (override[0] && strncmp(override,"pit",3))
- printk(KERN_ERR "Warning: clock= override failed. Defaulting "
- "to PIT\n");
- init_cpu_khz();
- count_p = LATCH;
- return 0;
-}
-
-static void mark_offset_pit(void)
-{
- /* nothing needed */
-}
-
-static unsigned long long monotonic_clock_pit(void)
-{
- return 0;
-}
-
-static void delay_pit(unsigned long loops)
-{
- int d0;
- __asm__ __volatile__(
- "\tjmp 1f\n"
- ".align 16\n"
- "1:\tjmp 2f\n"
- ".align 16\n"
- "2:\tdecl %0\n\tjns 2b"
- :"=&a" (d0)
- :"0" (loops));
-}
-
-
-/* This function must be called with xtime_lock held.
- * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
- *
- * However, the pc-audio speaker driver changes the divisor so that
- * it gets interrupted rather more often - it loads 64 into the
- * counter rather than 11932! This has an adverse impact on
- * do_gettimeoffset() -- it stops working! What is also not
- * good is that the interval that our timer function gets called
- * is no longer 10.0002 ms, but 9.9767 ms. To get around this
- * would require using a different timing source. Maybe someone
- * could use the RTC - I know that this can interrupt at frequencies
- * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix
- * it so that at startup, the timer code in sched.c would select
- * using either the RTC or the 8253 timer. The decision would be
- * based on whether there was any other device around that needed
- * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz,
- * and then do some jiggery to have a version of do_timer that
- * advanced the clock by 1/1024 s. Every time that reached over 1/100
- * of a second, then do all the old code. If the time was kept correct
- * then do_gettimeoffset could just return 0 - there is no low order
- * divider that can be accessed.
- *
- * Ideally, you would be able to use the RTC for the speaker driver,
- * but it appears that the speaker driver really needs interrupt more
- * often than every 120 us or so.
- *
- * Anyway, this needs more thought.... pjsg (1993-08-28)
- *
- * If you are really that interested, you should be reading
- * comp.protocols.time.ntp!
- */
-
-static unsigned long get_offset_pit(void)
-{
- int count;
- unsigned long flags;
- static unsigned long jiffies_p = 0;
-
- /*
- * cache volatile jiffies temporarily; we have xtime_lock.
- */
- unsigned long jiffies_t;
-
- spin_lock_irqsave(&i8253_lock, flags);
- /* timer count may underflow right here */
- outb_p(0x00, PIT_MODE); /* latch the count ASAP */
-
- count = inb_p(PIT_CH0); /* read the latched count */
-
- /*
- * We do this guaranteed double memory access instead of a _p
- * postfix in the previous port access. Wheee, hackady hack
- */
- jiffies_t = jiffies;
-
- count |= inb_p(PIT_CH0) << 8;
-
- /* VIA686a test code... reset the latch if count > max + 1 */
- if (count > LATCH) {
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff, PIT_CH0);
- outb(LATCH >> 8, PIT_CH0);
- count = LATCH - 1;
- }
-
- /*
- * avoiding timer inconsistencies (they are rare, but they happen)...
- * there are two kinds of problems that must be avoided here:
- * 1. the timer counter underflows
- * 2. hardware problem with the timer, not giving us continuous time,
- * the counter does small "jumps" upwards on some Pentium systems,
- * (see c't 95/10 page 335 for Neptun bug.)
- */
-
- if( jiffies_t == jiffies_p ) {
- if( count > count_p ) {
- /* the nutcase */
- count = do_timer_overflow(count);
- }
- } else
- jiffies_p = jiffies_t;
-
- count_p = count;
-
- spin_unlock_irqrestore(&i8253_lock, flags);
-
- count = ((LATCH-1) - count) * TICK_SIZE;
- count = (count + LATCH/2) / LATCH;
-
- return count;
-}
-
-
-/* tsc timer_opts struct */
-struct timer_opts timer_pit = {
- .name = "pit",
- .mark_offset = mark_offset_pit,
- .get_offset = get_offset_pit,
- .monotonic_clock = monotonic_clock_pit,
- .delay = delay_pit,
-};
-
-struct init_timer_opts __initdata timer_pit_init = {
- .init = init_pit,
- .opts = &timer_pit,
-};
-
-void setup_pit_timer(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
- udelay(10);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- udelay(10);
- outb(LATCH >> 8 , PIT_CH0); /* MSB */
- spin_unlock_irqrestore(&i8253_lock, flags);
-}
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
deleted file mode 100644
index 144e94a04933..000000000000
--- a/arch/i386/kernel/timers/timer_pm.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * (C) Dominik Brodowski <linux@brodo.de> 2003
- *
- * Driver to use the Power Management Timer (PMTMR) available in some
- * southbridges as primary timing source for the Linux kernel.
- *
- * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
- * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
- *
- * This file is licensed under the GPL v2.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <asm/types.h>
-#include <asm/timer.h>
-#include <asm/smp.h>
-#include <asm/io.h>
-#include <asm/arch_hooks.h>
-
-#include <linux/timex.h>
-#include "mach_timer.h"
-
-/* Number of PMTMR ticks expected during calibration run */
-#define PMTMR_TICKS_PER_SEC 3579545
-#define PMTMR_EXPECTED_RATE \
- ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
-
-
-/* The I/O port the PMTMR resides at.
- * The location is detected during setup_arch(),
- * in arch/i386/acpi/boot.c */
-u32 pmtmr_ioport = 0;
-
-
-/* value of the Power timer at last timer interrupt */
-static u32 offset_tick;
-static u32 offset_delay;
-
-static unsigned long long monotonic_base;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
-
-static int pmtmr_need_workaround __read_mostly = 1;
-
-/*helper function to safely read acpi pm timesource*/
-static inline u32 read_pmtmr(void)
-{
- if (pmtmr_need_workaround) {
- u32 v1, v2, v3;
-
- /* It has been reported that because of various broken
- * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
- * source is not latched, so you must read it multiple
- * times to insure a safe value is read.
- */
- do {
- v1 = inl(pmtmr_ioport);
- v2 = inl(pmtmr_ioport);
- v3 = inl(pmtmr_ioport);
- } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
- || (v3 > v1 && v3 < v2));
-
- /* mask the output to 24 bits */
- return v2 & ACPI_PM_MASK;
- }
-
- return inl(pmtmr_ioport) & ACPI_PM_MASK;
-}
-
-
-/*
- * Some boards have the PMTMR running way too fast. We check
- * the PMTMR rate against PIT channel 2 to catch these cases.
- */
-static int verify_pmtmr_rate(void)
-{
- u32 value1, value2;
- unsigned long count, delta;
-
- mach_prepare_counter();
- value1 = read_pmtmr();
- mach_countup(&count);
- value2 = read_pmtmr();
- delta = (value2 - value1) & ACPI_PM_MASK;
-
- /* Check that the PMTMR delta is within 5% of what we expect */
- if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
- delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
- printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% of normal - aborting.\n", 100UL * delta / PMTMR_EXPECTED_RATE);
- return -1;
- }
-
- return 0;
-}
-
-
-static int init_pmtmr(char* override)
-{
- u32 value1, value2;
- unsigned int i;
-
- if (override[0] && strncmp(override,"pmtmr",5))
- return -ENODEV;
-
- if (!pmtmr_ioport)
- return -ENODEV;
-
- /* we use the TSC for delay_pmtmr, so make sure it exists */
- if (!cpu_has_tsc)
- return -ENODEV;
-
- /* "verify" this timing source */
- value1 = read_pmtmr();
- for (i = 0; i < 10000; i++) {
- value2 = read_pmtmr();
- if (value2 == value1)
- continue;
- if (value2 > value1)
- goto pm_good;
- if ((value2 < value1) && ((value2) < 0xFFF))
- goto pm_good;
- printk(KERN_INFO "PM-Timer had inconsistent results: 0x%#x, 0x%#x - aborting.\n", value1, value2);
- return -EINVAL;
- }
- printk(KERN_INFO "PM-Timer had no reasonable result: 0x%#x - aborting.\n", value1);
- return -ENODEV;
-
-pm_good:
- if (verify_pmtmr_rate() != 0)
- return -ENODEV;
-
- init_cpu_khz();
- return 0;
-}
-
-static inline u32 cyc2us(u32 cycles)
-{
- /* The Power Management Timer ticks at 3.579545 ticks per microsecond.
- * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%]
- *
- * Even with HZ = 100, delta is at maximum 35796 ticks, so it can
- * easily be multiplied with 286 (=0x11E) without having to fear
- * u32 overflows.
- */
- cycles *= 286;
- return (cycles >> 10);
-}
-
-/*
- * this gets called during each timer interrupt
- * - Called while holding the writer xtime_lock
- */
-static void mark_offset_pmtmr(void)
-{
- u32 lost, delta, last_offset;
- static int first_run = 1;
- last_offset = offset_tick;
-
- write_seqlock(&monotonic_lock);
-
- offset_tick = read_pmtmr();
-
- /* calculate tick interval */
- delta = (offset_tick - last_offset) & ACPI_PM_MASK;
-
- /* convert to usecs */
- delta = cyc2us(delta);
-
- /* update the monotonic base value */
- monotonic_base += delta * NSEC_PER_USEC;
- write_sequnlock(&monotonic_lock);
-
- /* convert to ticks */
- delta += offset_delay;
- lost = delta / (USEC_PER_SEC / HZ);
- offset_delay = delta % (USEC_PER_SEC / HZ);
-
-
- /* compensate for lost ticks */
- if (lost >= 2)
- jiffies_64 += lost - 1;
-
- /* don't calculate delay for first run,
- or if we've got less then a tick */
- if (first_run || (lost < 1)) {
- first_run = 0;
- offset_delay = 0;
- }
-}
-
-static int pmtmr_resume(void)
-{
- write_seqlock(&monotonic_lock);
- /* Assume this is the last mark offset time */
- offset_tick = read_pmtmr();
- write_sequnlock(&monotonic_lock);
- return 0;
-}
-
-static unsigned long long monotonic_clock_pmtmr(void)
-{
- u32 last_offset, this_offset;
- unsigned long long base, ret;
- unsigned seq;
-
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = offset_tick;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
- /* Read the pmtmr */
- this_offset = read_pmtmr();
-
- /* convert to nanoseconds */
- ret = (this_offset - last_offset) & ACPI_PM_MASK;
- ret = base + (cyc2us(ret) * NSEC_PER_USEC);
- return ret;
-}
-
-static void delay_pmtmr(unsigned long loops)
-{
- unsigned long bclock, now;
-
- rdtscl(bclock);
- do
- {
- rep_nop();
- rdtscl(now);
- } while ((now-bclock) < loops);
-}
-
-
-/*
- * get the offset (in microseconds) from the last call to mark_offset()
- * - Called holding a reader xtime_lock
- */
-static unsigned long get_offset_pmtmr(void)
-{
- u32 now, offset, delta = 0;
-
- offset = offset_tick;
- now = read_pmtmr();
- delta = (now - offset)&ACPI_PM_MASK;
-
- return (unsigned long) offset_delay + cyc2us(delta);
-}
-
-
-/* acpi timer_opts struct */
-static struct timer_opts timer_pmtmr = {
- .name = "pmtmr",
- .mark_offset = mark_offset_pmtmr,
- .get_offset = get_offset_pmtmr,
- .monotonic_clock = monotonic_clock_pmtmr,
- .delay = delay_pmtmr,
- .read_timer = read_timer_tsc,
- .resume = pmtmr_resume,
-};
-
-struct init_timer_opts __initdata timer_pmtmr_init = {
- .init = init_pmtmr,
- .opts = &timer_pmtmr,
-};
-
-#ifdef CONFIG_PCI
-/*
- * PIIX4 Errata:
- *
- * The power management timer may return improper results when read.
- * Although the timer value settles properly after incrementing,
- * while incrementing there is a 3 ns window every 69.8 ns where the
- * timer value is indeterminate (a 4.2% chance that the data will be
- * incorrect when read). As a result, the ACPI free running count up
- * timer specification is violated due to erroneous reads.
- */
-static int __init pmtmr_bug_check(void)
-{
- static struct pci_device_id gray_list[] __initdata = {
- /* these chipsets may have bug. */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82801DB_0) },
- { },
- };
- struct pci_dev *dev;
- int pmtmr_has_bug = 0;
- u8 rev;
-
- if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround)
- return 0;
-
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
- if (dev) {
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
- /* the bug has been fixed in PIIX4M */
- if (rev < 3) {
- printk(KERN_WARNING "* Found PM-Timer Bug on this "
- "chipset. Due to workarounds for a bug,\n"
- "* this time source is slow. Consider trying "
- "other time sources (clock=)\n");
- pmtmr_has_bug = 1;
- }
- pci_dev_put(dev);
- }
-
- if (pci_dev_present(gray_list)) {
- printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due"
- " to workarounds for a bug,\n"
- "* this time source is slow. If you are sure your timer"
- " does not have\n"
- "* this bug, please use \"pmtmr_good\" to disable the "
- "workaround\n");
- pmtmr_has_bug = 1;
- }
-
- if (!pmtmr_has_bug)
- pmtmr_need_workaround = 0;
-
- return 0;
-}
-device_initcall(pmtmr_bug_check);
-#endif
-
-static int __init pmtr_good_setup(char *__str)
-{
- pmtmr_need_workaround = 0;
- return 1;
-}
-__setup("pmtmr_good", pmtr_good_setup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
-MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86");
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
deleted file mode 100644
index f1187ddb0d0f..000000000000
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- *
- * 2004-06-25 Jesper Juhl
- * moved mark_offset_tsc below cpufreq_delayed_get to avoid gcc 3.4
- * failing to inline.
- */
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/cpufreq.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-/* processor.h for distable_tsc flag */
-#include <asm/processor.h>
-
-#include "io_ports.h"
-#include "mach_timer.h"
-
-#include <asm/hpet.h>
-#include <asm/i8253.h>
-
-#ifdef CONFIG_HPET_TIMER
-static unsigned long hpet_usec_quotient;
-static unsigned long hpet_last;
-static struct timer_opts timer_tsc;
-#endif
-
-static inline void cpufreq_delayed_get(void);
-
-int tsc_disable __devinitdata = 0;
-
-static int use_tsc;
-/* Number of usecs that the last interrupt was delayed */
-static int delay_at_last_interrupt;
-
-static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
-static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
-static unsigned long long monotonic_base;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* Avoid compensating for lost ticks before TSCs are synched */
-static int detect_lost_ticks;
-static int __init start_lost_tick_compensation(void)
-{
- detect_lost_ticks = 1;
- return 0;
-}
-late_initcall(start_lost_tick_compensation);
-
-/* convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better percision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-static unsigned long cyc2ns_scale __read_mostly;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
-{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
-static int count2; /* counter for mark_offset_tsc() */
-
-/* Cached *multiplier* to convert TSC counts to microseconds.
- * (see the equation below).
- * Equal to 2^32 * (1 / (clocks per usec) ).
- * Initialized in time_init.
- */
-static unsigned long fast_gettimeoffset_quotient;
-
-static unsigned long get_offset_tsc(void)
-{
- register unsigned long eax, edx;
-
- /* Read the Time Stamp Counter */
-
- rdtsc(eax,edx);
-
- /* .. relative to previous jiffy (32 bits is enough) */
- eax -= last_tsc_low; /* tsc_low delta */
-
- /*
- * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
- * = (tsc_low delta) * (usecs_per_clock)
- * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
- *
- * Using a mull instead of a divl saves up to 31 clock cycles
- * in the critical path.
- */
-
- __asm__("mull %2"
- :"=a" (eax), "=d" (edx)
- :"rm" (fast_gettimeoffset_quotient),
- "0" (eax));
-
- /* our adjusted time offset in microseconds */
- return delay_at_last_interrupt + edx;
-}
-
-static unsigned long long monotonic_clock_tsc(void)
-{
- unsigned long long last_offset, this_offset, base;
- unsigned seq;
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return base + cycles_2_ns(this_offset - last_offset);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long this_offset;
-
- /*
- * In the NUMA case we dont use the TSC as they are not
- * synchronized across all CPUs.
- */
-#ifndef CONFIG_NUMA
- if (!use_tsc)
-#endif
- /* no locking but a rare wrong value is not a big deal */
- return jiffies_64 * (1000000000 / HZ);
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return cycles_2_ns(this_offset);
-}
-
-static void delay_tsc(unsigned long loops)
-{
- unsigned long bclock, now;
-
- rdtscl(bclock);
- do
- {
- rep_nop();
- rdtscl(now);
- } while ((now-bclock) < loops);
-}
-
-#ifdef CONFIG_HPET_TIMER
-static void mark_offset_tsc_hpet(void)
-{
- unsigned long long this_offset, last_offset;
- unsigned long offset, temp, hpet_current;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- /*
- * It is important that these two operations happen almost at
- * the same time. We do the RDTSC stuff first, since it's
- * faster. To avoid any inconsistencies, we need interrupts
- * disabled locally.
- */
- /*
- * Interrupts are just disabled locally since the timer irq
- * has the SA_INTERRUPT flag set. -arca
- */
- /* read Pentium cycle counter */
-
- hpet_current = hpet_readl(HPET_COUNTER);
- rdtsc(last_tsc_low, last_tsc_high);
-
- /* lost tick compensation */
- offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
- if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
- && detect_lost_ticks) {
- int lost_ticks = (offset - hpet_last) / hpet_tick;
- jiffies_64 += lost_ticks;
- }
- hpet_last = hpet_current;
-
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- /*
- * Time offset = (hpet delta) * ( usecs per HPET clock )
- * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
- * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
- * Where,
- * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
- */
- delay_at_last_interrupt = hpet_current - offset;
- ASM_MUL64_REG(temp, delay_at_last_interrupt,
- hpet_usec_quotient, delay_at_last_interrupt);
-}
-#endif
-
-
-#ifdef CONFIG_CPU_FREQ
-#include <linux/workqueue.h>
-
-static unsigned int cpufreq_delayed_issched = 0;
-static unsigned int cpufreq_init = 0;
-static struct work_struct cpufreq_delayed_get_work;
-
-static void handle_cpufreq_delayed_get(void *v)
-{
- unsigned int cpu;
- for_each_online_cpu(cpu) {
- cpufreq_get(cpu);
- }
- cpufreq_delayed_issched = 0;
-}
-
-/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
- * to verify the CPU frequency the timing core thinks the CPU is running
- * at is still correct.
- */
-static inline void cpufreq_delayed_get(void)
-{
- if (cpufreq_init && !cpufreq_delayed_issched) {
- cpufreq_delayed_issched = 1;
- printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
- schedule_work(&cpufreq_delayed_get_work);
- }
-}
-
-/* If the CPU frequency is scaled, TSC-based delays will need a different
- * loops_per_jiffy value to function properly.
- */
-
-static unsigned int ref_freq = 0;
-static unsigned long loops_per_jiffy_ref = 0;
-
-#ifndef CONFIG_SMP
-static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned int cpu_khz_ref = 0;
-#endif
-
-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
-
- if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
- write_seqlock_irq(&xtime_lock);
- if (!ref_freq) {
- if (!freq->old){
- ref_freq = freq->new;
- goto end;
- }
- ref_freq = freq->old;
- loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
-#ifndef CONFIG_SMP
- fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
- cpu_khz_ref = cpu_khz;
-#endif
- }
-
- if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
- (val == CPUFREQ_RESUMECHANGE)) {
- if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
-#ifndef CONFIG_SMP
- if (cpu_khz)
- cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
- if (use_tsc) {
- if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
- fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
- set_cyc2ns_scale(cpu_khz);
- }
- }
-#endif
- }
-
-end:
- if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
- write_sequnlock_irq(&xtime_lock);
-
- return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
- .notifier_call = time_cpufreq_notifier
-};
-
-
-static int __init cpufreq_tsc(void)
-{
- int ret;
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
- ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (!ret)
- cpufreq_init = 1;
- return ret;
-}
-core_initcall(cpufreq_tsc);
-
-#else /* CONFIG_CPU_FREQ */
-static inline void cpufreq_delayed_get(void) { return; }
-#endif
-
-int recalibrate_cpu_khz(void)
-{
-#ifndef CONFIG_SMP
- unsigned int cpu_khz_old = cpu_khz;
-
- if (cpu_has_tsc) {
- local_irq_disable();
- init_cpu_khz();
- local_irq_enable();
- cpu_data[0].loops_per_jiffy =
- cpufreq_scale(cpu_data[0].loops_per_jiffy,
- cpu_khz_old,
- cpu_khz);
- return 0;
- } else
- return -ENODEV;
-#else
- return -ENODEV;
-#endif
-}
-EXPORT_SYMBOL(recalibrate_cpu_khz);
-
-static void mark_offset_tsc(void)
-{
- unsigned long lost,delay;
- unsigned long delta = last_tsc_low;
- int count;
- int countmp;
- static int count1 = 0;
- unsigned long long this_offset, last_offset;
- static int lost_count = 0;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- /*
- * It is important that these two operations happen almost at
- * the same time. We do the RDTSC stuff first, since it's
- * faster. To avoid any inconsistencies, we need interrupts
- * disabled locally.
- */
-
- /*
- * Interrupts are just disabled locally since the timer irq
- * has the SA_INTERRUPT flag set. -arca
- */
-
- /* read Pentium cycle counter */
-
- rdtsc(last_tsc_low, last_tsc_high);
-
- spin_lock(&i8253_lock);
- outb_p(0x00, PIT_MODE); /* latch the count ASAP */
-
- count = inb_p(PIT_CH0); /* read the latched count */
- count |= inb(PIT_CH0) << 8;
-
- /*
- * VIA686a test code... reset the latch if count > max + 1
- * from timer_pit.c - cjb
- */
- if (count > LATCH) {
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff, PIT_CH0);
- outb(LATCH >> 8, PIT_CH0);
- count = LATCH - 1;
- }
-
- spin_unlock(&i8253_lock);
-
- if (pit_latch_buggy) {
- /* get center value of last 3 time lutch */
- if ((count2 >= count && count >= count1)
- || (count1 >= count && count >= count2)) {
- count2 = count1; count1 = count;
- } else if ((count1 >= count2 && count2 >= count)
- || (count >= count2 && count2 >= count1)) {
- countmp = count;count = count2;
- count2 = count1;count1 = countmp;
- } else {
- count2 = count1; count1 = count; count = count1;
- }
- }
-
- /* lost tick compensation */
- delta = last_tsc_low - delta;
- {
- register unsigned long eax, edx;
- eax = delta;
- __asm__("mull %2"
- :"=a" (eax), "=d" (edx)
- :"rm" (fast_gettimeoffset_quotient),
- "0" (eax));
- delta = edx;
- }
- delta += delay_at_last_interrupt;
- lost = delta/(1000000/HZ);
- delay = delta%(1000000/HZ);
- if (lost >= 2 && detect_lost_ticks) {
- jiffies_64 += lost-1;
-
- /* sanity check to ensure we're not always losing ticks */
- if (lost_count++ > 100) {
- printk(KERN_WARNING "Losing too many ticks!\n");
- printk(KERN_WARNING "TSC cannot be used as a timesource. \n");
- printk(KERN_WARNING "Possible reasons for this are:\n");
- printk(KERN_WARNING " You're running with Speedstep,\n");
- printk(KERN_WARNING " You don't have DMA enabled for your hard disk (see hdparm),\n");
- printk(KERN_WARNING " Incorrect TSC synchronization on an SMP system (see dmesg).\n");
- printk(KERN_WARNING "Falling back to a sane timesource now.\n");
-
- clock_fallback();
- }
- /* ... but give the TSC a fair chance */
- if (lost_count > 25)
- cpufreq_delayed_get();
- } else
- lost_count = 0;
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- count = ((LATCH-1) - count) * TICK_SIZE;
- delay_at_last_interrupt = (count + LATCH/2) / LATCH;
-
- /* catch corner case where tick rollover occured
- * between tsc and pit reads (as noted when
- * usec delta is > 90% # of usecs/tick)
- */
- if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
- jiffies_64++;
-}
-
-static int __init init_tsc(char* override)
-{
-
- /* check clock override */
- if (override[0] && strncmp(override,"tsc",3)) {
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled()) {
- printk(KERN_ERR "Warning: clock= override failed. Defaulting to tsc\n");
- } else
-#endif
- {
- return -ENODEV;
- }
- }
-
- /*
- * If we have APM enabled or the CPU clock speed is variable
- * (CPU stops clock on HLT or slows clock to save power)
- * then the TSC timestamps may diverge by up to 1 jiffy from
- * 'real time' but nothing will break.
- * The most frequent case is that the CPU is "woken" from a halt
- * state by the timer interrupt itself, so we get 0 error. In the
- * rare cases where a driver would "wake" the CPU and request a
- * timestamp, the maximum error is < 1 jiffy. But timestamps are
- * still perfectly ordered.
- * Note that the TSC counter will be reset if APM suspends
- * to disk; this won't break the kernel, though, 'cuz we're
- * smart. See arch/i386/kernel/apm.c.
- */
- /*
- * Firstly we have to do a CPU check for chips with
- * a potentially buggy TSC. At this point we haven't run
- * the ident/bugs checks so we must run this hook as it
- * may turn off the TSC flag.
- *
- * NOTE: this doesn't yet handle SMP 486 machines where only
- * some CPU's have a TSC. Thats never worked and nobody has
- * moaned if you have the only one in the world - you fix it!
- */
-
- count2 = LATCH; /* initialize counter for mark_offset_tsc() */
-
- if (cpu_has_tsc) {
- unsigned long tsc_quotient;
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled() && hpet_use_timer) {
- unsigned long result, remain;
- printk("Using TSC for gettimeofday\n");
- tsc_quotient = calibrate_tsc_hpet(NULL);
- timer_tsc.mark_offset = &mark_offset_tsc_hpet;
- /*
- * Math to calculate hpet to usec multiplier
- * Look for the comments at get_offset_tsc_hpet()
- */
- ASM_DIV64_REG(result, remain, hpet_tick,
- 0, KERNEL_TICK_USEC);
- if (remain > (hpet_tick >> 1))
- result++; /* rounding the result */
-
- hpet_usec_quotient = result;
- } else
-#endif
- {
- tsc_quotient = calibrate_tsc();
- }
-
- if (tsc_quotient) {
- fast_gettimeoffset_quotient = tsc_quotient;
- use_tsc = 1;
- /*
- * We could be more selective here I suspect
- * and just enable this for the next intel chips ?
- */
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000;
- __asm__("divl %2"
- :"=a" (cpu_khz), "=d" (edx)
- :"r" (tsc_quotient),
- "0" (eax), "1" (edx));
- printk("Detected %u.%03u MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
- }
- set_cyc2ns_scale(cpu_khz);
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static int tsc_resume(void)
-{
- write_seqlock(&monotonic_lock);
- /* Assume this is the last mark offset time */
- rdtsc(last_tsc_low, last_tsc_high);
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled() && hpet_use_timer)
- hpet_last = hpet_readl(HPET_COUNTER);
-#endif
- write_sequnlock(&monotonic_lock);
- return 0;
-}
-
-#ifndef CONFIG_X86_TSC
-/* disable flag for tsc. Takes effect by clearing the TSC cpu flag
- * in cpu/common.c */
-static int __init tsc_setup(char *str)
-{
- tsc_disable = 1;
- return 1;
-}
-#else
-static int __init tsc_setup(char *str)
-{
- printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
- "cannot disable TSC.\n");
- return 1;
-}
-#endif
-__setup("notsc", tsc_setup);
-
-
-
-/************************************************************/
-
-/* tsc timer_opts struct */
-static struct timer_opts timer_tsc = {
- .name = "tsc",
- .mark_offset = mark_offset_tsc,
- .get_offset = get_offset_tsc,
- .monotonic_clock = monotonic_clock_tsc,
- .delay = delay_tsc,
- .read_timer = read_timer_tsc,
- .resume = tsc_resume,
-};
-
-struct init_timer_opts __initdata timer_tsc_init = {
- .init = init_tsc,
- .opts = &timer_tsc,
-};
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 296355292c7c..e2e281d4bcc8 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -32,15 +32,8 @@
static struct i386_cpu cpu_devices[NR_CPUS];
-int arch_register_cpu(int num){
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- if (node_online(node))
- parent = &node_devices[node].node;
-#endif /* CONFIG_NUMA */
-
+int arch_register_cpu(int num)
+{
/*
* CPU0 cannot be offlined due to several
* restrictions and assumptions in kernel. This basically
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){
if (!num)
cpu_devices[num].cpu.no_control = 1;
- return register_cpu(&cpu_devices[num].cpu, num, parent);
+ return register_cpu(&cpu_devices[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_unregister_cpu(int num) {
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- if (node_online(node))
- parent = &node_devices[node].node;
-#endif /* CONFIG_NUMA */
-
- return unregister_cpu(&cpu_devices[num].cpu, parent);
+ return unregister_cpu(&cpu_devices[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu);
#ifdef CONFIG_NUMA
#include <linux/mmzone.h>
-#include <asm/node.h>
-
-struct i386_node node_devices[MAX_NUMNODES];
static int __init topology_init(void)
{
int i;
for_each_online_node(i)
- arch_register_node(i);
+ register_one_node(i);
for_each_present_cpu(i)
arch_register_cpu(i);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index dcc14477af1f..78464097470a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -28,6 +28,7 @@
#include <linux/utsname.h>
#include <linux/kprobes.h>
#include <linux/kexec.h>
+#include <linux/unwind.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
@@ -47,7 +48,7 @@
#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/nmi.h>
-
+#include <asm/unwind.h>
#include <asm/smp.h>
#include <asm/arch_hooks.h>
#include <asm/kdebug.h>
@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
+static int call_trace = 1;
ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb)
@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
return ebp;
}
-static void show_trace_log_lvl(struct task_struct *task,
+static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
+{
+ int n = 0;
+ int printed = 0; /* nr of entries already printed on current line */
+
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ ++n;
+ printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed);
+ if (arch_unw_user_mode(info))
+ break;
+ }
+ if (printed)
+ printk("\n");
+ return n;
+}
+
+static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl)
{
unsigned long ebp;
@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task,
if (!task)
task = current;
+ if (call_trace >= 0) {
+ int unw_ret = 0;
+ struct unwind_frame_info info;
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, task, regs) == 0)
+ unw_ret = show_trace_unwind(&info, log_lvl);
+ } else if (task == current)
+ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
+ else {
+ if (unwind_init_blocked(&info, task) == 0)
+ unw_ret = show_trace_unwind(&info, log_lvl);
+ }
+ if (unw_ret > 0) {
+ if (call_trace > 0)
+ return;
+ printk("%sLegacy call trace:\n", log_lvl);
+ }
+ }
+
if (task == current) {
/* Grab ebp right from our regs */
asm ("movl %%ebp, %0" : "=r" (ebp) : );
@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task,
}
}
-void show_trace(struct task_struct *task, unsigned long * stack)
+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
{
- show_trace_log_lvl(task, stack, "");
+ show_trace_log_lvl(task, regs, stack, "");
}
-static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
- char *log_lvl)
+static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *esp, char *log_lvl)
{
unsigned long *stack;
int i;
@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
printk("%08lx ", *stack++);
}
printk("\n%sCall Trace:\n", log_lvl);
- show_trace_log_lvl(task, esp, log_lvl);
+ show_trace_log_lvl(task, regs, esp, log_lvl);
}
void show_stack(struct task_struct *task, unsigned long *esp)
{
printk(" ");
- show_stack_log_lvl(task, esp, "");
+ show_stack_log_lvl(task, NULL, esp, "");
}
/*
@@ -241,7 +279,7 @@ void dump_stack(void)
{
unsigned long stack;
- show_trace(current, &stack);
+ show_trace(current, NULL, &stack);
}
EXPORT_SYMBOL(dump_stack);
@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs)
u8 __user *eip;
printk("\n" KERN_EMERG "Stack: ");
- show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG);
+ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
printk(KERN_EMERG "Code: ");
@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s)
return 1;
}
__setup("kstack=", kstack_setup);
+
+static int __init call_trace_setup(char *s)
+{
+ if (strcmp(s, "old") == 0)
+ call_trace = -1;
+ else if (strcmp(s, "both") == 0)
+ call_trace = 0;
+ else if (strcmp(s, "new") == 0)
+ call_trace = 1;
+ return 1;
+}
+__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
new file mode 100644
index 000000000000..7e0d8dab2075
--- /dev/null
+++ b/arch/i386/kernel/tsc.c
@@ -0,0 +1,478 @@
+/*
+ * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
+ * which was originally moved from arch/i386/kernel/time.c.
+ * See comments there for proper credits.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/workqueue.h>
+#include <linux/cpufreq.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+
+#include <asm/delay.h>
+#include <asm/tsc.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+
+#include "mach_timer.h"
+
+/*
+ * On some systems the TSC frequency does not
+ * change with the cpu frequency. So we need
+ * an extra value to store the TSC freq
+ */
+unsigned int tsc_khz;
+
+int tsc_disable __cpuinitdata = 0;
+
+#ifdef CONFIG_X86_TSC
+static int __init tsc_setup(char *str)
+{
+ printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
+ "cannot disable TSC.\n");
+ return 1;
+}
+#else
+/*
+ * disable flag for tsc. Takes effect by clearing the TSC cpu flag
+ * in cpu/common.c
+ */
+static int __init tsc_setup(char *str)
+{
+ tsc_disable = 1;
+
+ return 1;
+}
+#endif
+
+__setup("notsc", tsc_setup);
+
+/*
+ * code to mark and check if the TSC is unstable
+ * due to cpufreq or due to unsynced TSCs
+ */
+static int tsc_unstable;
+
+static inline int check_tsc_unstable(void)
+{
+ return tsc_unstable;
+}
+
+void mark_tsc_unstable(void)
+{
+ tsc_unstable = 1;
+}
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better percision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_khz)
+{
+ cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long this_offset;
+
+ /*
+ * in the NUMA case we dont use the TSC as they are not
+ * synchronized across all CPUs.
+ */
+#ifndef CONFIG_NUMA
+ if (!cpu_khz || check_tsc_unstable())
+#endif
+ /* no locking but a rare wrong value is not a big deal */
+ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
+
+ /* read the Time Stamp Counter: */
+ rdtscll(this_offset);
+
+ /* return the value in ns */
+ return cycles_2_ns(this_offset);
+}
+
+static unsigned long calculate_cpu_khz(void)
+{
+ unsigned long long start, end;
+ unsigned long count;
+ u64 delta64;
+ int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* run 3 times to ensure the cache is warm */
+ for (i = 0; i < 3; i++) {
+ mach_prepare_counter();
+ rdtscll(start);
+ mach_countup(&count);
+ rdtscll(end);
+ }
+ /*
+ * Error: ECTCNEVERSET
+ * The CTC wasn't reliable: we got a hit on the very first read,
+ * or the CPU was so fast/slow that the quotient wouldn't fit in
+ * 32 bits..
+ */
+ if (count <= 1)
+ goto err;
+
+ delta64 = end - start;
+
+ /* cpu freq too fast: */
+ if (delta64 > (1ULL<<32))
+ goto err;
+
+ /* cpu freq too slow: */
+ if (delta64 <= CALIBRATE_TIME_MSEC)
+ goto err;
+
+ delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
+ do_div(delta64,CALIBRATE_TIME_MSEC);
+
+ local_irq_restore(flags);
+ return (unsigned long)delta64;
+err:
+ local_irq_restore(flags);
+ return 0;
+}
+
+int recalibrate_cpu_khz(void)
+{
+#ifndef CONFIG_SMP
+ unsigned long cpu_khz_old = cpu_khz;
+
+ if (cpu_has_tsc) {
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
+ cpu_data[0].loops_per_jiffy =
+ cpufreq_scale(cpu_data[0].loops_per_jiffy,
+ cpu_khz_old, cpu_khz);
+ return 0;
+ } else
+ return -ENODEV;
+#else
+ return -ENODEV;
+#endif
+}
+
+EXPORT_SYMBOL(recalibrate_cpu_khz);
+
+void tsc_init(void)
+{
+ if (!cpu_has_tsc || tsc_disable)
+ return;
+
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
+
+ if (!cpu_khz)
+ return;
+
+ printk("Detected %lu.%03lu MHz processor.\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
+
+ set_cyc2ns_scale(cpu_khz);
+ use_tsc_delay();
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static unsigned int cpufreq_delayed_issched = 0;
+static unsigned int cpufreq_init = 0;
+static struct work_struct cpufreq_delayed_get_work;
+
+static void handle_cpufreq_delayed_get(void *v)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ cpufreq_get(cpu);
+
+ cpufreq_delayed_issched = 0;
+}
+
+/*
+ * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
+ * to verify the CPU frequency the timing core thinks the CPU is running
+ * at is still correct.
+ */
+static inline void cpufreq_delayed_get(void)
+{
+ if (cpufreq_init && !cpufreq_delayed_issched) {
+ cpufreq_delayed_issched = 1;
+ printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
+ schedule_work(&cpufreq_delayed_get_work);
+ }
+}
+
+/*
+ * if the CPU frequency is scaled, TSC-based delays will need a different
+ * loops_per_jiffy value to function properly.
+ */
+static unsigned int ref_freq = 0;
+static unsigned long loops_per_jiffy_ref = 0;
+static unsigned long cpu_khz_ref = 0;
+
+static int
+time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
+ write_seqlock_irq(&xtime_lock);
+
+ if (!ref_freq) {
+ if (!freq->old){
+ ref_freq = freq->new;
+ goto end;
+ }
+ ref_freq = freq->old;
+ loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+ cpu_khz_ref = cpu_khz;
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+ (val == CPUFREQ_RESUMECHANGE)) {
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+ cpu_data[freq->cpu].loops_per_jiffy =
+ cpufreq_scale(loops_per_jiffy_ref,
+ ref_freq, freq->new);
+
+ if (cpu_khz) {
+
+ if (num_online_cpus() == 1)
+ cpu_khz = cpufreq_scale(cpu_khz_ref,
+ ref_freq, freq->new);
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+ tsc_khz = cpu_khz;
+ set_cyc2ns_scale(cpu_khz);
+ /*
+ * TSC based sched_clock turns
+ * to junk w/ cpufreq
+ */
+ mark_tsc_unstable();
+ }
+ }
+ }
+end:
+ if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
+ write_sequnlock_irq(&xtime_lock);
+
+ return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+ .notifier_call = time_cpufreq_notifier
+};
+
+static int __init cpufreq_tsc(void)
+{
+ int ret;
+
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (!ret)
+ cpufreq_init = 1;
+
+ return ret;
+}
+
+core_initcall(cpufreq_tsc);
+
+#endif
+
+/* clock source code */
+
+static unsigned long current_tsc_khz = 0;
+static int tsc_update_callback(void);
+
+static cycle_t read_tsc(void)
+{
+ cycle_t ret;
+
+ rdtscll(ret);
+
+ return ret;
+}
+
+static struct clocksource clocksource_tsc = {
+ .name = "tsc",
+ .rating = 300,
+ .read = read_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 0, /* to be set */
+ .shift = 22,
+ .update_callback = tsc_update_callback,
+ .is_continuous = 1,
+};
+
+static int tsc_update_callback(void)
+{
+ int change = 0;
+
+ /* check to see if we should switch to the safe clocksource: */
+ if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
+ clocksource_tsc.rating = 50;
+ clocksource_reselect();
+ change = 1;
+ }
+
+ /* only update if tsc_khz has changed: */
+ if (current_tsc_khz != tsc_khz) {
+ current_tsc_khz = tsc_khz;
+ clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
+ clocksource_tsc.shift);
+ change = 1;
+ }
+
+ return change;
+}
+
+static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
+ d->ident);
+ mark_tsc_unstable();
+ return 0;
+}
+
+/* List of systems that have known TSC problems */
+static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
+ {
+ .callback = dmi_mark_tsc_unstable,
+ .ident = "IBM Thinkpad 380XD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
+ DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
+ },
+ },
+ {}
+};
+
+#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
+static struct timer_list verify_tsc_freq_timer;
+
+/* XXX - Probably should add locking */
+static void verify_tsc_freq(unsigned long unused)
+{
+ static u64 last_tsc;
+ static unsigned long last_jiffies;
+
+ u64 now_tsc, interval_tsc;
+ unsigned long now_jiffies, interval_jiffies;
+
+
+ if (check_tsc_unstable())
+ return;
+
+ rdtscll(now_tsc);
+ now_jiffies = jiffies;
+
+ if (!last_jiffies) {
+ goto out;
+ }
+
+ interval_jiffies = now_jiffies - last_jiffies;
+ interval_tsc = now_tsc - last_tsc;
+ interval_tsc *= HZ;
+ do_div(interval_tsc, cpu_khz*1000);
+
+ if (interval_tsc < (interval_jiffies * 3 / 4)) {
+ printk("TSC appears to be running slowly. "
+ "Marking it as unstable\n");
+ mark_tsc_unstable();
+ return;
+ }
+
+out:
+ last_tsc = now_tsc;
+ last_jiffies = now_jiffies;
+ /* set us up to go off on the next interval: */
+ mod_timer(&verify_tsc_freq_timer,
+ jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
+}
+
+/*
+ * Make an educated guess if the TSC is trustworthy and synchronized
+ * over all CPUs.
+ */
+static __init int unsynchronized_tsc(void)
+{
+ /*
+ * Intel systems are normally all synchronized.
+ * Exceptions must mark TSC as unstable:
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ return 0;
+
+ /* assume multi socket systems are not synchronized: */
+ return num_possible_cpus() > 1;
+}
+
+static int __init init_tsc_clocksource(void)
+{
+
+ if (cpu_has_tsc && tsc_khz && !tsc_disable) {
+ /* check blacklist */
+ dmi_check_system(bad_tsc_dmi_table);
+
+ if (unsynchronized_tsc()) /* mark unstable if unsynced */
+ mark_tsc_unstable();
+ current_tsc_khz = tsc_khz;
+ clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
+ clocksource_tsc.shift);
+ /* lower the rating if we already know its unstable: */
+ if (check_tsc_unstable())
+ clocksource_tsc.rating = 50;
+
+ init_timer(&verify_tsc_freq_timer);
+ verify_tsc_freq_timer.function = verify_tsc_freq;
+ verify_tsc_freq_timer.expires =
+ jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
+ add_timer(&verify_tsc_freq_timer);
+
+ return clocksource_register(&clocksource_tsc);
+ }
+
+ return 0;
+}
+
+module_init(init_tsc_clocksource);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 7512f39c9f25..2d4f1386e2b1 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -71,6 +71,15 @@ SECTIONS
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
_edata = .; /* End of data section */
+#ifdef CONFIG_STACK_UNWIND
+ . = ALIGN(4);
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
+ __start_unwind = .;
+ *(.eh_frame)
+ __end_unwind = .;
+ }
+#endif
+
. = ALIGN(THREAD_SIZE); /* init_task */
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
*(.data.init_task)
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 3b62baa6a371..1a36d26e15eb 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -42,10 +42,10 @@ __kernel_vsyscall:
/* 7: align return point with nop's to make disassembly easier */
.space 7,0x90
- /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */
+ /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
jmp .Lenter_kernel
/* 16: System call normal return point is here! */
- .globl SYSENTER_RETURN /* Symbol used by entry.S. */
+ .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
SYSENTER_RETURN:
pop %ebp
.Lpop_ebp:
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index 98699ca6e52d..e26975fc68b6 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
SECTIONS
{
- . = VSYSCALL_BASE + SIZEOF_HEADERS;
+ . = VDSO_PRELINK + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.dynsym : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
For the layouts to match, we need to skip more than enough
space for the dynamic symbol table et al. If this amount
is insufficient, ld -shared will barf. Just increase it here. */
- . = VSYSCALL_BASE + 0x400;
+ . = VDSO_PRELINK + 0x400;
.text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index c49a6acbee56..3c0714c4b669 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -10,43 +10,92 @@
* we have to worry about.
*/
+#include <linux/module.h>
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/delay.h>
-#include <linux/module.h>
+
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/timer.h>
#ifdef CONFIG_SMP
-#include <asm/smp.h>
+# include <asm/smp.h>
#endif
-extern struct timer_opts* timer;
+/* simple loop based delay: */
+static void delay_loop(unsigned long loops)
+{
+ int d0;
+
+ __asm__ __volatile__(
+ "\tjmp 1f\n"
+ ".align 16\n"
+ "1:\tjmp 2f\n"
+ ".align 16\n"
+ "2:\tdecl %0\n\tjns 2b"
+ :"=&a" (d0)
+ :"0" (loops));
+}
+
+/* TSC based delay: */
+static void delay_tsc(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ rdtscl(bclock);
+ do {
+ rep_nop();
+ rdtscl(now);
+ } while ((now-bclock) < loops);
+}
+
+/*
+ * Since we calibrate only once at boot, this
+ * function should be set once at boot and not changed
+ */
+static void (*delay_fn)(unsigned long) = delay_loop;
+
+void use_tsc_delay(void)
+{
+ delay_fn = delay_tsc;
+}
+
+int read_current_timer(unsigned long *timer_val)
+{
+ if (delay_fn == delay_tsc) {
+ rdtscl(*timer_val);
+ return 0;
+ }
+ return -1;
+}
void __delay(unsigned long loops)
{
- cur_timer->delay(loops);
+ delay_fn(loops);
}
inline void __const_udelay(unsigned long xloops)
{
int d0;
+
xloops *= 4;
__asm__("mull %0"
:"=d" (xloops), "=&a" (d0)
- :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
- __delay(++xloops);
+ :"1" (xloops), "0"
+ (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
+
+ __delay(++xloops);
}
void __udelay(unsigned long usecs)
{
- __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
+ __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
void __ndelay(unsigned long nsecs)
{
- __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+ __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__delay);
diff --git a/arch/i386/mach-visws/setup.c b/arch/i386/mach-visws/setup.c
index 8a9e1a6f745d..1f84cdb24779 100644
--- a/arch/i386/mach-visws/setup.c
+++ b/arch/i386/mach-visws/setup.c
@@ -140,8 +140,8 @@ void __init time_init_hook(void)
#define MB (1024 * 1024)
-static unsigned long sgivwfb_mem_phys;
-static unsigned long sgivwfb_mem_size;
+unsigned long sgivwfb_mem_phys;
+unsigned long sgivwfb_mem_size;
long long mem_size __initdata = 0;
@@ -177,8 +177,4 @@ char * __init machine_specific_memory_setup(void)
add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED);
return "PROM";
-
- /* Remove gcc warnings */
- (void) sanitize_e820_map(NULL, NULL);
- (void) copy_e820_map(NULL, 0);
}
diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c
index 3e64fb721291..c418521dd554 100644
--- a/arch/i386/mach-visws/visws_apic.c
+++ b/arch/i386/mach-visws/visws_apic.c
@@ -278,22 +278,22 @@ void init_VISWS_APIC_irqs(void)
irq_desc[i].depth = 1;
if (i == 0) {
- irq_desc[i].handler = &cobalt_irq_type;
+ irq_desc[i].chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_IDE0) {
- irq_desc[i].handler = &cobalt_irq_type;
+ irq_desc[i].chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_IDE1) {
- irq_desc[i].handler = &cobalt_irq_type;
+ irq_desc[i].chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_8259) {
- irq_desc[i].handler = &piix4_master_irq_type;
+ irq_desc[i].chip = &piix4_master_irq_type;
}
else if (i < CO_IRQ_APIC0) {
- irq_desc[i].handler = &piix4_virtual_irq_type;
+ irq_desc[i].chip = &piix4_virtual_irq_type;
}
else if (IS_CO_APIC(i)) {
- irq_desc[i].handler = &cobalt_irq_type;
+ irq_desc[i].chip = &cobalt_irq_type;
}
}
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index 0e225054e222..defc6ebbd565 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -5,10 +5,10 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/acpi.h>
#include <asm/arch_hooks.h>
#include <asm/voyager.h>
#include <asm/e820.h>
+#include <asm/io.h>
#include <asm/setup.h>
void __init pre_intr_init_hook(void)
@@ -27,8 +27,7 @@ void __init intr_init_hook(void)
smp_intr_init();
#endif
- if (!acpi_ioapic)
- setup_irq(2, &irq2);
+ setup_irq(2, &irq2);
}
void __init pre_setup_arch_hook(void)
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 70e560a1b79a..5b8b579a079f 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -661,6 +661,7 @@ do_boot_cpu(__u8 cpu)
print_cpu_info(&cpu_data[cpu]);
wmb();
cpu_set(cpu, cpu_callout_map);
+ cpu_set(cpu, cpu_present_map);
}
else {
printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -1418,7 +1419,7 @@ smp_intr_init(void)
* This is for later: first 16 correspond to PC IRQs; next 16
* are Primary MC IRQs and final 16 are Secondary MC IRQs */
for(i = 0; i < 48; i++)
- irq_desc[i].handler = &vic_irq_type;
+ irq_desc[i].chip = &vic_irq_type;
}
/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1912,6 +1913,7 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
+ cpu_set(smp_processor_id(), cpu_present_map);
}
int __devinit
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index bd6fe96cc16d..6ee7faaf2c1b 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -30,6 +30,40 @@
extern void die(const char *,struct pt_regs *,long);
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
+
/*
* Unlock any spinlocks which will prevent us from getting the
* message out
@@ -324,7 +358,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
if (unlikely(address >= TASK_SIZE)) {
if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
return;
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
/*
@@ -334,7 +368,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index bf19513f0cea..f84b16e007ff 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/poison.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
@@ -654,7 +655,7 @@ void __init mem_init(void)
*/
#ifdef CONFIG_MEMORY_HOTPLUG
#ifndef CONFIG_NEED_MULTIPLE_NODES
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdata = &contig_page_data;
struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
@@ -753,7 +754,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 0887b34bc59b..353a836ed63c 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
if (PageHighMem(page))
return;
if (!enable)
- mutex_debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index ec0fd3cfa774..fa8a37bcb391 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -281,9 +281,9 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
for (i = 0; i < model->num_counters; ++i) {
struct dentry * dir;
- char buf[2];
+ char buf[4];
- snprintf(buf, 2, "%d", i);
+ snprintf(buf, sizeof(buf), "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
index 3ad9a72a5036..693bdea4a52b 100644
--- a/arch/i386/oprofile/op_model_athlon.c
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -13,6 +13,7 @@
#include <linux/oprofile.h>
#include <asm/ptrace.h>
#include <asm/msr.h>
+#include <asm/nmi.h>
#include "op_x86_model.h"
#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c
index ac8a066035c2..7c61d357b82b 100644
--- a/arch/i386/oprofile/op_model_p4.c
+++ b/arch/i386/oprofile/op_model_p4.c
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/fixmap.h>
#include <asm/apic.h>
+#include <asm/nmi.h>
#include "op_x86_model.h"
#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
index d719015fc044..5c3ab4b027ad 100644
--- a/arch/i386/oprofile/op_model_ppro.c
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/msr.h>
#include <asm/apic.h>
+#include <asm/nmi.h>
#include "op_x86_model.h"
#include "op_counter.h"
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index a151f7a99f5e..10154a2cac68 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -48,10 +48,10 @@
*/
void
pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 1eec0868f4b3..ed1512a175ab 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -371,8 +371,7 @@ void __devinit pcibios_sort(void)
list_for_each(ln, &pci_devices) {
d = pci_dev_g(ln);
if (d->bus->number == bus && d->devfn == devfn) {
- list_del(&d->global_list);
- list_add_tail(&d->global_list, &sorted_devices);
+ list_move_tail(&d->global_list, &sorted_devices);
if (d == dev)
found = 1;
break;
@@ -390,8 +389,7 @@ void __devinit pcibios_sort(void)
if (!found) {
printk(KERN_WARNING "PCI: Device %s not found by BIOS\n",
pci_name(dev));
- list_del(&dev->global_list);
- list_add_tail(&dev->global_list, &sorted_devices);
+ list_move_tail(&dev->global_list, &sorted_devices);
}
}
list_splice(&sorted_devices, &pci_devices);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 18318749884b..b487e227a1f7 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -271,6 +271,9 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
+
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
@@ -374,6 +377,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
+config HAVE_ARCH_NODEDATA_EXTENSION
+ def_bool y
+ depends on NUMA
+
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
@@ -485,6 +492,10 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
+config IRQ_PER_CPU
+ bool
+ default y
+
source "arch/ia64/hp/sim/Kconfig"
menu "Instrumentation Support"
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 766bf4955432..9d1cffb57cde 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -114,7 +114,7 @@ CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
-CONFIG_NR_CPUS=4
+CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_PERMIT_BSP_REMOVE=y
CONFIG_FORCE_CPEI_RETARGET=y
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index c0d25a2a3e9c..8145547bb52d 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -44,8 +44,8 @@ hpsim_irq_init (void)
int i;
for (i = 0; i < NR_IRQS; ++i) {
- idesc = irq_descp(i);
- if (idesc->handler == &no_irq_type)
- idesc->handler = &irq_type_hp_sim;
+ idesc = irq_desc + i;
+ if (idesc->chip == &no_irq_type)
+ idesc->chip = &irq_type_hp_sim;
}
}
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index d58c1c5c903a..efc7df4b0fd2 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -456,7 +456,7 @@ iosapic_startup_edge_irq (unsigned int irq)
static void
iosapic_ack_edge_irq (unsigned int irq)
{
- irq_desc_t *idesc = irq_descp(irq);
+ irq_desc_t *idesc = irq_desc + irq;
move_native_irq(irq);
/*
@@ -659,14 +659,14 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
else
irq_type = &irq_type_iosapic_level;
- idesc = irq_descp(vector);
- if (idesc->handler != irq_type) {
- if (idesc->handler != &no_irq_type)
+ idesc = irq_desc + vector;
+ if (idesc->chip != irq_type) {
+ if (idesc->chip != &no_irq_type)
printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n",
__FUNCTION__, vector,
- idesc->handler->typename, irq_type->typename);
- idesc->handler = irq_type;
+ idesc->chip->typename, irq_type->typename);
+ idesc->chip = irq_type;
}
return 0;
}
@@ -793,14 +793,14 @@ again:
return -ENOSPC;
}
- spin_lock_irqsave(&irq_descp(vector)->lock, flags);
+ spin_lock_irqsave(&irq_desc[vector].lock, flags);
spin_lock(&iosapic_lock);
{
if (gsi_to_vector(gsi) > 0) {
if (list_empty(&iosapic_intr_info[vector].rtes))
free_irq_vector(vector);
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
goto again;
}
@@ -810,7 +810,7 @@ again:
polarity, trigger);
if (err < 0) {
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
return err;
}
@@ -825,7 +825,7 @@ again:
set_rte(gsi, vector, dest, mask);
}
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+ spin_unlock_irqrestore(&irq_desc[vector].lock, flags);
printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
@@ -860,7 +860,7 @@ iosapic_unregister_intr (unsigned int gsi)
}
vector = irq_to_vector(irq);
- idesc = irq_descp(irq);
+ idesc = irq_desc + irq;
spin_lock_irqsave(&idesc->lock, flags);
spin_lock(&iosapic_lock);
{
@@ -903,7 +903,7 @@ iosapic_unregister_intr (unsigned int gsi)
BUG_ON(iosapic_intr_info[vector].count);
/* Clear the interrupt controller descriptor */
- idesc->handler = &no_irq_type;
+ idesc->chip = &no_irq_type;
/* Clear the interrupt information */
memset(&iosapic_intr_info[vector], 0,
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 9c72ea3f6432..7852382de2fa 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
}
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
cpu_set(cpu_logical_id(hwid), mask);
if (irq < NR_IRQS) {
- irq_affinity[irq] = mask;
+ irq_desc[irq].affinity = mask;
irq_redir[irq] = (char) (redir & 0xff);
}
}
@@ -120,7 +120,7 @@ static void migrate_irqs(void)
int irq, new_cpu;
for (irq=0; irq < NR_IRQS; irq++) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
/*
* No handling for now.
@@ -131,7 +131,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_affinity[irq], cpu_online_map);
+ cpus_and(mask, irq_desc[irq].affinity, cpu_online_map);
if (any_online_cpu(mask) == NR_CPUS) {
/*
* Save it for phase 2 processing
@@ -144,15 +144,15 @@ static void migrate_irqs(void)
/*
* Al three are essential, currently WARN_ON.. maybe panic?
*/
- if (desc->handler && desc->handler->disable &&
- desc->handler->enable && desc->handler->set_affinity) {
- desc->handler->disable(irq);
- desc->handler->set_affinity(irq, mask);
- desc->handler->enable(irq);
+ if (desc->chip && desc->chip->disable &&
+ desc->chip->enable && desc->chip->set_affinity) {
+ desc->chip->disable(irq);
+ desc->chip->set_affinity(irq, mask);
+ desc->chip->enable(irq);
} else {
- WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
- !(desc->handler->enable) ||
- !(desc->handler->set_affinity)));
+ WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
+ !(desc->chip->enable) ||
+ !(desc->chip->set_affinity)));
}
}
}
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index ef9a2b49307a..f5035304594e 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -249,9 +249,9 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == vec) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
+ desc->chip = &irq_type_ia64_lsapic;
if (action)
setup_irq(irq, action);
}
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index ea14e6a04409..1ab58b09f3d7 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -26,6 +26,13 @@ lsapic_noop (unsigned int irq)
/* nuthing to do... */
}
+static int lsapic_retrigger(unsigned int irq)
+{
+ ia64_resend_irq(irq);
+
+ return 1;
+}
+
struct hw_interrupt_type irq_type_ia64_lsapic = {
.typename = "LSAPIC",
.startup = lsapic_noop_startup,
@@ -33,5 +40,6 @@ struct hw_interrupt_type irq_type_ia64_lsapic = {
.enable = lsapic_noop,
.disable = lsapic_noop,
.ack = lsapic_noop,
- .end = lsapic_noop
+ .end = lsapic_noop,
+ .retrigger = lsapic_retrigger,
};
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6a0880639bc9..d7dc5e63de63 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1788,7 +1788,7 @@ ia64_mca_late_init(void)
cpe_poll_enabled = 0;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
ia64_cpe_irq = irq;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 859fb37ff49b..8a1208419138 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int palinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block palinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
@@ -998,7 +998,7 @@ palinfo_init(void)
}
/* Register for future delivery via notify registration */
- register_cpu_notifier(&palinfo_cpu_notifier);
+ register_hotcpu_notifier(&palinfo_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 6d7bc8ff7b3a..a0055d3d695c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -6165,7 +6165,7 @@ pfm_load_regs (struct task_struct *task)
/*
* will replay the PMU interrupt
*/
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
+ if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
@@ -6305,7 +6305,7 @@ pfm_load_regs (struct task_struct *task)
/*
* will replay the PMU interrupt
*/
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
+ if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 355d57970ba3..b045c279136c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -272,9 +272,9 @@ cpu_idle (void)
/* endless idle loop with no priority at all */
while (1) {
if (can_do_pal_halt)
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
else
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
if (!need_resched()) {
void (*idle)(void);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 663a186ad194..9065f0f01ba3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
};
#ifdef CONFIG_HOTPLUG_CPU
-static int
+static int __devinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
@@ -673,9 +673,7 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&salinfo_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&salinfo_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 44e9547878ac..5203df78f150 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -677,16 +677,16 @@ int migrate_platform_irqs(unsigned int cpu)
new_cpei_cpu = any_online_cpu(cpu_online_map);
mask = cpumask_of_cpu(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
- desc = irq_descp(ia64_cpe_irq);
+ desc = irq_desc + ia64_cpe_irq;
/*
* Switch for now, immediatly, we need to do fake intr
* as other interrupts, but need to study CPEI behaviour with
* polling before making changes.
*/
if (desc) {
- desc->handler->disable(ia64_cpe_irq);
- desc->handler->set_affinity(ia64_cpe_irq, mask);
- desc->handler->enable(ia64_cpe_irq);
+ desc->chip->disable(ia64_cpe_irq);
+ desc->chip->set_affinity(ia64_cpe_irq, mask);
+ desc->chip->enable(ia64_cpe_irq);
printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
}
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 879edb51d1e0..5511d9c6c701 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -26,19 +26,10 @@
#include <asm/numa.h>
#include <asm/cpu.h>
-#ifdef CONFIG_NUMA
-static struct node *sysfs_nodes;
-#endif
static struct ia64_cpu *sysfs_cpus;
int arch_register_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- parent = &sysfs_nodes[cpu_to_node(num)];
-#endif /* CONFIG_NUMA */
-
#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
/*
* If CPEI cannot be re-targetted, and this is
@@ -48,21 +39,14 @@ int arch_register_cpu(int num)
sysfs_cpus[num].cpu.no_control = 1;
#endif
- return register_cpu(&sysfs_cpus[num].cpu, num, parent);
+ return register_cpu(&sysfs_cpus[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_unregister_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- parent = &sysfs_nodes[node];
-#endif /* CONFIG_NUMA */
-
- return unregister_cpu(&sysfs_cpus[num].cpu, parent);
+ return unregister_cpu(&sysfs_cpus[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,17 +58,11 @@ static int __init topology_init(void)
int i, err = 0;
#ifdef CONFIG_NUMA
- sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
- if (!sysfs_nodes) {
- err = -ENOMEM;
- goto out;
- }
-
/*
* MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
*/
for_each_online_node(i) {
- if ((err = register_node(&sysfs_nodes[i], i, 0)))
+ if ((err = register_one_node(i)))
goto out;
}
#endif
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int cache_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cache_cpu_notifier =
+static struct notifier_block __cpuinitdata cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index b6bcc9fa3603..525b082eb661 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -33,7 +33,6 @@
*/
struct early_node_data {
struct ia64_node_data *node_data;
- pg_data_t *pgdat;
unsigned long pernode_addr;
unsigned long pernode_size;
struct bootmem_data bootmem_data;
@@ -46,6 +45,8 @@ struct early_node_data {
static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
static nodemask_t memory_less_mask __initdata;
+static pg_data_t *pgdat_list[MAX_NUMNODES];
+
/*
* To prevent cache aliasing effects, align per-node structures so that they
* start at addresses that are strided by node number.
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __init early_nr_cpus_node(int node)
+static int __meminit early_nr_cpus_node(int node)
{
int cpu, n = 0;
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __init compute_pernodesize(int node)
+static unsigned long __meminit compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
pernode += PERCPU_PAGE_SIZE * cpus;
pernode += node * L1_CACHE_BYTES;
- mem_data[node].pgdat = __va(pernode);
+ pgdat_list[node] = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
- mem_data[node].pgdat->bdata = bdp;
+ pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
static int __init free_node_bootmem(unsigned long start, unsigned long len,
int node)
{
- free_bootmem_node(mem_data[node].pgdat, start, len);
+ free_bootmem_node(pgdat_list[node], start, len);
return 0;
}
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void)
int node;
for_each_online_node(node) {
- pg_data_t *pdp = mem_data[node].pgdat;
+ pg_data_t *pdp = pgdat_list[node];
if (node_isset(node, memory_less_mask))
continue;
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void)
}
}
+static void __meminit scatter_node_data(void)
+{
+ pg_data_t **dst;
+ int node;
+
+ for_each_online_node(node) {
+ dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
+ memcpy(dst, pgdat_list, sizeof(pgdat_list));
+ }
+}
+
/**
* initialize_pernode_data - fixup per-cpu & per-node pointers
*
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void)
*/
static void __init initialize_pernode_data(void)
{
- pg_data_t *pgdat_list[MAX_NUMNODES];
int cpu, node;
- for_each_online_node(node)
- pgdat_list[node] = mem_data[node].pgdat;
+ scatter_node_data();
- /* Copy the pg_data_t list to each node and init the node field */
- for_each_online_node(node) {
- memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
- sizeof(pgdat_list));
- }
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
+ ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
return ptr;
@@ -476,7 +481,7 @@ void __init find_memory(void)
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
- init_bootmem_node(mem_data[node].pgdat,
+ init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
@@ -786,3 +791,21 @@ void __init paging_init(void)
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
+
+pg_data_t *arch_alloc_nodedata(int nid)
+{
+ unsigned long size = compute_pernodesize(nid);
+
+ return kzalloc(size, GFP_KERNEL);
+}
+
+void arch_free_nodedata(pg_data_t *pgdat)
+{
+ kfree(pgdat);
+}
+
+void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
+{
+ pgdat_list[update_node] = update_pgdat;
+ scatter_node_data();
+}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index d98ec49570b8..14ef7cceb208 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -19,6 +19,40 @@
extern void die (char *, struct pt_regs *, long);
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
@@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
SIGSEGV) == NOTIFY_STOP)
return;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 11f08001f8c2..38306e98f04b 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -652,7 +652,7 @@ void online_page(struct page *page)
num_physpages++;
}
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
struct zone *zone;
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size)
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(0);
+ pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
ret = __add_pages(zone, start_pfn, nr_pages);
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size)
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 77375a55da31..5bef0e3603f2 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -568,7 +568,7 @@ pcibios_disable_device (struct pci_dev *dev)
void
pcibios_align_resource (void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index dc8e2b696713..7bb6ad188ba3 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
-static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
@@ -225,8 +225,8 @@ void sn_irq_init(void)
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].handler == &no_irq_type) {
- base_desc[i].handler = &irq_type_sn;
+ if (base_desc[i].chip == &no_irq_type) {
+ base_desc[i].chip = &irq_type_sn;
}
}
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 93577abae36d..3bfccf354343 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -458,7 +458,7 @@ void __init sn_setup(char **cmdline_p)
* support here so we don't have to listen to failed keyboard probe
* messages.
*/
- if (version <= 0x0209 && acpi_kbd_controller_present) {
+ if (is_shub1() && version <= 0x0209 && acpi_kbd_controller_present) {
printk(KERN_INFO "Disabling legacy keyboard support as prom "
"is too old and doesn't provide FADT\n");
acpi_kbd_controller_present = 0;
@@ -577,7 +577,8 @@ void __init sn_cpu_init(void)
int i;
static int wars_have_been_checked;
- if (smp_processor_id() == 0 && IS_MEDUSA()) {
+ cpuid = smp_processor_id();
+ if (cpuid == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
else
@@ -597,6 +598,12 @@ void __init sn_cpu_init(void)
sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
/*
+ * Don't check status. The SAL call is not supported on all PROMs
+ * but a failure is harmless.
+ */
+ (void) ia64_sn_set_cpu_number(cpuid);
+
+ /*
* The boot cpu makes this call again after platform initialization is
* complete.
*/
@@ -607,7 +614,6 @@ void __init sn_cpu_init(void)
if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
break;
- cpuid = smp_processor_id();
cpuphyid = get_sapicid();
if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 20de72791b97..e4aa839d0189 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -595,7 +595,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* sanity check prom rev */
- if (sn_sal_rev() < 0x0406) {
+ if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __FUNCTION__);
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index a4634b06f675..3841861df6a2 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -54,7 +54,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 3cd3c2988a48..1ff483c8a4c9 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -275,7 +275,7 @@ static int __init topology_init(void)
int i;
for_each_present_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
return 0;
}
diff --git a/arch/m32r/kernel/setup_m32104ut.c b/arch/m32r/kernel/setup_m32104ut.c
index 6328e1357a80..f9f56c270195 100644
--- a/arch/m32r/kernel/setup_m32104ut.c
+++ b/arch/m32r/kernel/setup_m32104ut.c
@@ -87,7 +87,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_INT0].chip = &m32104ut_irq_type;
irq_desc[M32R_IRQ_INT0].action = 0;
irq_desc[M32R_IRQ_INT0].depth = 1;
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
@@ -96,7 +96,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &m32104ut_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -105,7 +105,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &m32104ut_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
@@ -113,7 +113,7 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &m32104ut_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
diff --git a/arch/m32r/kernel/setup_m32700ut.c b/arch/m32r/kernel/setup_m32700ut.c
index fad1fc99bb27..b6ab00eff580 100644
--- a/arch/m32r/kernel/setup_m32700ut.c
+++ b/arch/m32r/kernel/setup_m32700ut.c
@@ -301,7 +301,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/
irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED;
- irq_desc[M32700UT_LAN_IRQ_LAN].handler = &m32700ut_lanpld_irq_type;
+ irq_desc[M32700UT_LAN_IRQ_LAN].chip = &m32700ut_lanpld_irq_type;
irq_desc[M32700UT_LAN_IRQ_LAN].action = 0;
irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
@@ -310,7 +310,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -318,7 +318,7 @@ void __init init_IRQ(void)
/* SIO0 : receive */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -326,7 +326,7 @@ void __init init_IRQ(void)
/* SIO0 : send */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
@@ -334,7 +334,7 @@ void __init init_IRQ(void)
/* SIO1 : receive */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -342,7 +342,7 @@ void __init init_IRQ(void)
/* SIO1 : send */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -350,7 +350,7 @@ void __init init_IRQ(void)
/* DMA1 : */
irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_DMA1].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_DMA1].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_DMA1].action = 0;
irq_desc[M32R_IRQ_DMA1].depth = 1;
icu_data[M32R_IRQ_DMA1].icucr = 0;
@@ -359,7 +359,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_PLDSIO
/* INT#1: SIO0 Receive on PLD */
irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_RCV].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_RCV].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
@@ -367,7 +367,7 @@ void __init init_IRQ(void)
/* INT#1: SIO0 Send on PLD */
irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_SND].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_SND].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_SIO0_SND].action = 0;
irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
@@ -376,7 +376,7 @@ void __init init_IRQ(void)
/* INT#1: CFC IREQ on PLD */
irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_CFIREQ].action = 0;
irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
@@ -384,7 +384,7 @@ void __init init_IRQ(void)
/* INT#1: CFC Insert on PLD */
irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
@@ -392,7 +392,7 @@ void __init init_IRQ(void)
/* INT#1: CFC Eject on PLD */
irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
@@ -416,7 +416,7 @@ void __init init_IRQ(void)
outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
- irq_desc[M32700UT_LCD_IRQ_USB_INT1].handler = &m32700ut_lcdpld_irq_type;
+ irq_desc[M32700UT_LCD_IRQ_USB_INT1].chip = &m32700ut_lcdpld_irq_type;
irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0;
irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1;
lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
@@ -434,7 +434,7 @@ void __init init_IRQ(void)
* INT3# is used for AR
*/
irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_INT3].chip = &m32700ut_irq_type;
irq_desc[M32R_IRQ_INT3].action = 0;
irq_desc[M32R_IRQ_INT3].depth = 1;
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c
index 00f253209cb3..c268044185f5 100644
--- a/arch/m32r/kernel/setup_mappi.c
+++ b/arch/m32r/kernel/setup_mappi.c
@@ -86,7 +86,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_NE2000
/* INT0 : LAN controller (RTL8019AS) */
irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_INT0].action = 0;
irq_desc[M32R_IRQ_INT0].depth = 1;
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
@@ -95,7 +95,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -104,7 +104,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -112,7 +112,7 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
@@ -120,7 +120,7 @@ void __init init_IRQ(void)
/* SIO1_R : uart receive data */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -128,7 +128,7 @@ void __init init_IRQ(void)
/* SIO1_S : uart send data */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -138,7 +138,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_M32R_PCC)
/* INT1 : pccard0 interrupt */
irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT1].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_INT1].action = 0;
irq_desc[M32R_IRQ_INT1].depth = 1;
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
@@ -146,7 +146,7 @@ void __init init_IRQ(void)
/* INT2 : pccard1 interrupt */
irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT2].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_INT2].action = 0;
irq_desc[M32R_IRQ_INT2].depth = 1;
icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
diff --git a/arch/m32r/kernel/setup_mappi2.c b/arch/m32r/kernel/setup_mappi2.c
index eebc9d8b4e72..bd2327d5cca2 100644
--- a/arch/m32r/kernel/setup_mappi2.c
+++ b/arch/m32r/kernel/setup_mappi2.c
@@ -87,7 +87,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT0 : LAN controller (SMC91111) */
irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_INT0].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_INT0].action = 0;
irq_desc[M32R_IRQ_INT0].depth = 1;
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
@@ -96,7 +96,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -105,7 +105,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -113,14 +113,14 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi2_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -128,7 +128,7 @@ void __init init_IRQ(void)
/* SIO1_S : uart send data */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -138,7 +138,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_USB)
/* INT1 : USB Host controller interrupt */
irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_INT1].chip = &mappi2_irq_type;
irq_desc[M32R_IRQ_INT1].action = 0;
irq_desc[M32R_IRQ_INT1].depth = 1;
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
@@ -147,7 +147,7 @@ void __init init_IRQ(void)
/* ICUCR40: CFC IREQ */
irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].chip = &mappi2_irq_type;
irq_desc[PLD_IRQ_CFIREQ].action = 0;
irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
@@ -156,7 +156,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_M32R_CFC)
/* ICUCR41: CFC Insert */
irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi2_irq_type;
irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
@@ -164,7 +164,7 @@ void __init init_IRQ(void)
/* ICUCR42: CFC Eject */
irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].chip = &mappi2_irq_type;
irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c
index d2ff021e2d3d..014b51d17505 100644
--- a/arch/m32r/kernel/setup_mappi3.c
+++ b/arch/m32r/kernel/setup_mappi3.c
@@ -87,7 +87,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT0 : LAN controller (SMC91111) */
irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_INT0].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_INT0].action = 0;
irq_desc[M32R_IRQ_INT0].depth = 1;
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
@@ -96,7 +96,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -105,7 +105,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -113,14 +113,14 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -128,7 +128,7 @@ void __init init_IRQ(void)
/* SIO1_S : uart send data */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -138,7 +138,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_USB)
/* INT1 : USB Host controller interrupt */
irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].handler = &mappi3_irq_type;
+ irq_desc[M32R_IRQ_INT1].chip = &mappi3_irq_type;
irq_desc[M32R_IRQ_INT1].action = 0;
irq_desc[M32R_IRQ_INT1].depth = 1;
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
@@ -147,7 +147,7 @@ void __init init_IRQ(void)
/* CFC IREQ */
irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].handler = &mappi3_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].chip = &mappi3_irq_type;
irq_desc[PLD_IRQ_CFIREQ].action = 0;
irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
@@ -156,7 +156,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_M32R_CFC)
/* ICUCR41: CFC Insert & eject */
irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi3_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi3_irq_type;
irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
@@ -166,7 +166,7 @@ void __init init_IRQ(void)
/* IDE IREQ */
irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_IDEIREQ].handler = &mappi3_irq_type;
+ irq_desc[PLD_IRQ_IDEIREQ].chip = &mappi3_irq_type;
irq_desc[PLD_IRQ_IDEIREQ].action = 0;
irq_desc[PLD_IRQ_IDEIREQ].depth = 1; /* disable nested irq */
icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
diff --git a/arch/m32r/kernel/setup_oaks32r.c b/arch/m32r/kernel/setup_oaks32r.c
index 0e9e63538c0f..ea64831aef7a 100644
--- a/arch/m32r/kernel/setup_oaks32r.c
+++ b/arch/m32r/kernel/setup_oaks32r.c
@@ -85,7 +85,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_NE2000
/* INT3 : LAN controller (RTL8019AS) */
irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_INT3].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_INT3].action = 0;
irq_desc[M32R_IRQ_INT3].depth = 1;
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
@@ -94,7 +94,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -103,7 +103,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -111,7 +111,7 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
@@ -119,7 +119,7 @@ void __init init_IRQ(void)
/* SIO1_R : uart receive data */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -127,7 +127,7 @@ void __init init_IRQ(void)
/* SIO1_S : uart send data */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &oaks32r_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
diff --git a/arch/m32r/kernel/setup_opsput.c b/arch/m32r/kernel/setup_opsput.c
index 548e8fc7949b..55e8972d455a 100644
--- a/arch/m32r/kernel/setup_opsput.c
+++ b/arch/m32r/kernel/setup_opsput.c
@@ -302,7 +302,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on OPSPUT-LAN (SMC91C111)*/
irq_desc[OPSPUT_LAN_IRQ_LAN].status = IRQ_DISABLED;
- irq_desc[OPSPUT_LAN_IRQ_LAN].handler = &opsput_lanpld_irq_type;
+ irq_desc[OPSPUT_LAN_IRQ_LAN].chip = &opsput_lanpld_irq_type;
irq_desc[OPSPUT_LAN_IRQ_LAN].action = 0;
irq_desc[OPSPUT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
lanpld_icu_data[irq2lanpldirq(OPSPUT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
@@ -311,7 +311,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -319,7 +319,7 @@ void __init init_IRQ(void)
/* SIO0 : receive */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -327,7 +327,7 @@ void __init init_IRQ(void)
/* SIO0 : send */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
@@ -335,7 +335,7 @@ void __init init_IRQ(void)
/* SIO1 : receive */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -343,7 +343,7 @@ void __init init_IRQ(void)
/* SIO1 : send */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -351,7 +351,7 @@ void __init init_IRQ(void)
/* DMA1 : */
irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_DMA1].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_DMA1].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_DMA1].action = 0;
irq_desc[M32R_IRQ_DMA1].depth = 1;
icu_data[M32R_IRQ_DMA1].icucr = 0;
@@ -360,7 +360,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SERIAL_M32R_PLDSIO
/* INT#1: SIO0 Receive on PLD */
irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_RCV].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_RCV].chip = &opsput_pld_irq_type;
irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
@@ -368,7 +368,7 @@ void __init init_IRQ(void)
/* INT#1: SIO0 Send on PLD */
irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_SND].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_SND].chip = &opsput_pld_irq_type;
irq_desc[PLD_IRQ_SIO0_SND].action = 0;
irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
@@ -378,7 +378,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_M32R_CFC)
/* INT#1: CFC IREQ on PLD */
irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].chip = &opsput_pld_irq_type;
irq_desc[PLD_IRQ_CFIREQ].action = 0;
irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
@@ -386,7 +386,7 @@ void __init init_IRQ(void)
/* INT#1: CFC Insert on PLD */
irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].chip = &opsput_pld_irq_type;
irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
@@ -394,7 +394,7 @@ void __init init_IRQ(void)
/* INT#1: CFC Eject on PLD */
irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].chip = &opsput_pld_irq_type;
irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
@@ -420,7 +420,7 @@ void __init init_IRQ(void)
outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
irq_desc[OPSPUT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
- irq_desc[OPSPUT_LCD_IRQ_USB_INT1].handler = &opsput_lcdpld_irq_type;
+ irq_desc[OPSPUT_LCD_IRQ_USB_INT1].chip = &opsput_lcdpld_irq_type;
irq_desc[OPSPUT_LCD_IRQ_USB_INT1].action = 0;
irq_desc[OPSPUT_LCD_IRQ_USB_INT1].depth = 1;
lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
@@ -438,7 +438,7 @@ void __init init_IRQ(void)
* INT3# is used for AR
*/
irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_INT3].chip = &opsput_irq_type;
irq_desc[M32R_IRQ_INT3].action = 0;
irq_desc[M32R_IRQ_INT3].depth = 1;
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
diff --git a/arch/m32r/kernel/setup_usrv.c b/arch/m32r/kernel/setup_usrv.c
index 64be659a23e7..7fa12d8f66b4 100644
--- a/arch/m32r/kernel/setup_usrv.c
+++ b/arch/m32r/kernel/setup_usrv.c
@@ -158,7 +158,7 @@ void __init init_IRQ(void)
/* MFT2 : system timer */
irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_MFT2].action = 0;
irq_desc[M32R_IRQ_MFT2].depth = 1;
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
@@ -167,7 +167,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SERIAL_M32R_SIO)
/* SIO0_R : uart receive data */
irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO0_R].action = 0;
irq_desc[M32R_IRQ_SIO0_R].depth = 1;
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
@@ -175,7 +175,7 @@ void __init init_IRQ(void)
/* SIO0_S : uart send data */
irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO0_S].action = 0;
irq_desc[M32R_IRQ_SIO0_S].depth = 1;
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
@@ -183,7 +183,7 @@ void __init init_IRQ(void)
/* SIO1_R : uart receive data */
irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO1_R].action = 0;
irq_desc[M32R_IRQ_SIO1_R].depth = 1;
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
@@ -191,7 +191,7 @@ void __init init_IRQ(void)
/* SIO1_S : uart send data */
irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_SIO1_S].action = 0;
irq_desc[M32R_IRQ_SIO1_S].depth = 1;
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
@@ -201,7 +201,7 @@ void __init init_IRQ(void)
/* INT#67-#71: CFC#0 IREQ on PLD */
for (i = 0 ; i < CONFIG_CFC_NUM ; i++ ) {
irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CF0 + i].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CF0 + i].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_CF0 + i].action = 0;
irq_desc[PLD_IRQ_CF0 + i].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
@@ -212,7 +212,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
/* INT#76: 16552D#0 IREQ on PLD */
irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_UART0].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_UART0].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_UART0].action = 0;
irq_desc[PLD_IRQ_UART0].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
@@ -221,7 +221,7 @@ void __init init_IRQ(void)
/* INT#77: 16552D#1 IREQ on PLD */
irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_UART1].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_UART1].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_UART1].action = 0;
irq_desc[PLD_IRQ_UART1].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
@@ -232,7 +232,7 @@ void __init init_IRQ(void)
#if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
/* INT#80: AK4524 IREQ on PLD */
irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SNDINT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SNDINT].chip = &m32700ut_pld_irq_type;
irq_desc[PLD_IRQ_SNDINT].action = 0;
irq_desc[PLD_IRQ_SNDINT].depth = 1; /* disable nested irq */
pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index d6d582a5abb0..a226668f20c3 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -94,8 +94,7 @@ pmd_t *get_pointer_table (void)
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
- list_del(dp);
- list_add_tail(dp, &ptable_list);
+ list_move_tail(dp, &ptable_list);
}
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}
@@ -123,8 +122,7 @@ int free_pointer_table (pmd_t *ptable)
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
- list_del(dp);
- list_add(dp, &ptable_list);
+ list_move(dp, &ptable_list);
}
return 0;
}
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index f04a1d25f1a2..97c7bfde8ae8 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -119,8 +119,7 @@ static inline int refill(void)
if(hole->end == prev->start) {
hole->size += prev->size;
hole->end = prev->end;
- list_del(&(prev->list));
- list_add(&(prev->list), &hole_cache);
+ list_move(&(prev->list), &hole_cache);
ret++;
}
@@ -182,8 +181,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
#endif
return hole->end;
} else if(hole->size == newlen) {
- list_del(&(hole->list));
- list_add(&(hole->list), &hole_cache);
+ list_move(&(hole->list), &hole_cache);
dvma_entry_use(hole->start) = newlen;
#ifdef DVMA_DEBUG
dvma_allocs++;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 3cde6822ead1..e767f2ddae72 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -5,7 +5,7 @@
mainmenu "uClinux/68k (w/o MMU) Kernel Configuration"
-config M68KNOMMU
+config M68K
bool
default y
@@ -119,6 +119,11 @@ config M5307
help
Motorola ColdFire 5307 processor support.
+config M532x
+ bool "MCF532x"
+ help
+ Freescale (Motorola) ColdFire 532x processor support.
+
config M5407
bool "MCF5407"
help
@@ -133,125 +138,43 @@ config M527x
config COLDFIRE
bool
- depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M5407)
+ depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407)
default y
-choice
- prompt "CPU CLOCK Frequency"
- default AUTO
-
-config CLOCK_AUTO
- bool "AUTO"
- ---help---
- Define the CPU clock frequency in use. On many boards you don't
- really need to know, so you can select the AUTO option. On some
- boards you need to know the real clock frequency to determine other
- system timing (for example baud rate dividors, etc). Some processors
- have an internal PLL and you can select a frequency to run at.
- You need to know a little about the internals of your processor to
- set this. If in doubt choose the AUTO option.
-
-config CLOCK_11MHz
- bool "11MHz"
- help
- Select a 11MHz CPU clock frequency.
-
-config CLOCK_16MHz
- bool "16MHz"
- help
- Select a 16MHz CPU clock frequency.
-
-config CLOCK_20MHz
- bool "20MHz"
- help
- Select a 20MHz CPU clock frequency.
-
-config CLOCK_24MHz
- bool "24MHz"
- help
- Select a 24MHz CPU clock frequency.
-
-config CLOCK_25MHz
- bool "25MHz"
- help
- Select a 25MHz CPU clock frequency.
-
-config CLOCK_33MHz
- bool "33MHz"
- help
- Select a 33MHz CPU clock frequency.
-
-config CLOCK_40MHz
- bool "40MHz"
- help
- Select a 40MHz CPU clock frequency.
-
-config CLOCK_45MHz
- bool "45MHz"
- help
- Select a 45MHz CPU clock frequency.
-
-config CLOCK_48MHz
- bool "48MHz"
- help
- Select a 48MHz CPU clock frequency.
-
-config CLOCK_50MHz
- bool "50MHz"
- help
- Select a 50MHz CPU clock frequency.
-
-config CLOCK_54MHz
- bool "54MHz"
- help
- Select a 54MHz CPU clock frequency.
-
-config CLOCK_60MHz
- bool "60MHz"
- help
- Select a 60MHz CPU clock frequency.
-
-config CLOCK_62_5MHz
- bool "62.5MHz"
- help
- Select a 62.5MHz CPU clock frequency.
-
-config CLOCK_64MHz
- bool "64MHz"
- help
- Select a 64MHz CPU clock frequency.
-
-config CLOCK_66MHz
- bool "66MHz"
- help
- Select a 66MHz CPU clock frequency.
-
-config CLOCK_70MHz
- bool "70MHz"
- help
- Select a 70MHz CPU clock frequency.
-
-config CLOCK_100MHz
- bool "100MHz"
- help
- Select a 100MHz CPU clock frequency.
-
-config CLOCK_140MHz
- bool "140MHz"
- help
- Select a 140MHz CPU clock frequency.
-
-config CLOCK_150MHz
- bool "150MHz"
- help
- Select a 150MHz CPU clock frequency.
-
-config CLOCK_166MHz
- bool "166MHz"
+config CLOCK_SET
+ bool "Enable setting the CPU clock frequency"
+ default n
help
- Select a 166MHz CPU clock frequency.
-
-endchoice
+ On some CPU's you do not need to know what the core CPU clock
+ frequency is. On these you can disable clock setting. On some
+ traditional 68K parts, and on all ColdFire parts you need to set
+ the appropriate CPU clock frequency. On these devices many of the
+ onboard peripherals derive their timing from the master CPU clock
+ frequency.
+
+config CLOCK_FREQ
+ int "Set the core clock frequency"
+ default "66666666"
+ depends on CLOCK_SET
+ help
+ Define the CPU clock frequency in use. This is the core clock
+ frequency, it may or may not be the same as the external clock
+ crystal fitted to your board. Some processors have an internal
+ PLL and can have their frequency programmed at run time, others
+ use internal dividers. In gernal the kernel won't setup a PLL
+ if it is fitted (there are some expections). This value will be
+ specific to the exact CPU that you are using.
+
+config CLOCK_DIV
+ int "Set the core/bus clock divide ratio"
+ default "1"
+ depends on CLOCK_SET
+ help
+ On many SoC style CPUs the master CPU clock is also used to drive
+ on-chip peripherals. The clock that is distributed to these
+ peripherals is sometimes a fixed ratio of the master clock
+ frequency. If so then set this to the divider ration of the
+ master clock to the peripheral clock. If not sure then select 1.
config OLDMASK
bool "Old mask 5307 (1H55J) silicon"
@@ -377,6 +300,12 @@ config COBRA5272
help
Support for the senTec COBRA5272 board.
+config AVNET5282
+ bool "Avnet 5282 board support"
+ depends on M528x
+ help
+ Support for the Avnet 5282 board.
+
config M5282EVB
bool "Motorola M5282EVB board support"
depends on M528x
@@ -419,6 +348,18 @@ config SECUREEDGEMP3
help
Support for the SnapGear SecureEdge/MP3 platform.
+config M5329EVB
+ bool "Freescale (Motorola) M5329EVB board support"
+ depends on M532x
+ help
+ Support for the Freescale (Motorola) M5329EVB board.
+
+config COBRA5329
+ bool "senTec COBRA5329 board support"
+ depends on M532x
+ help
+ Support for the senTec COBRA5329 board.
+
config M5407C3
bool "Motorola M5407C3 board support"
depends on M5407
@@ -487,7 +428,7 @@ config ARNEWSH
config FREESCALE
bool
default y
- depends on (M5206eC3 || M5208EVB || M5235EVB || M5249C3 || M5271EVB || M5272C3 || M5275EVB || M5282EVB || M5307C3 || M5407C3)
+ depends on (M5206eC3 || M5208EVB || M5235EVB || M5249C3 || M5271EVB || M5272C3 || M5275EVB || M5282EVB || M5307C3 || M5329EVB || M5407C3)
config HW_FEITH
bool
@@ -508,6 +449,11 @@ config SNEHA
bool
default y
depends on CPU16B
+
+config AVNET
+ bool
+ default y
+ depends on (AVNET5282)
config LARGE_ALLOCS
bool "Allow allocating large blocks (> 1MB) of memory"
@@ -526,38 +472,46 @@ config 4KSTACKS
running more threads on a system and also reduces the pressure
on the VM subsystem for higher order allocations.
-choice
- prompt "RAM size"
- default AUTO
-
-config RAMAUTO
- bool "AUTO"
- ---help---
- Configure the RAM size on your platform. Many platforms can auto
- detect this, on those choose the AUTO option. Otherwise set the
- RAM size you intend using.
+comment "RAM configuration"
-config RAM4MB
- bool "4MiB"
+config RAMBASE
+ hex "Address of the base of RAM"
+ default "0"
help
- Set RAM size to be 4MiB.
+ Define the address that RAM starts at. On many platforms this is
+ 0, the base of the address space. And this is the default. Some
+ platforms choose to setup their RAM at other addresses within the
+ processor address space.
-config RAM8MB
- bool "8MiB"
+config RAMSIZE
+ hex "Size of RAM (in bytes)"
+ default "0x400000"
help
- Set RAM size to be 8MiB.
+ Define the size of the system RAM. If you select 0 then the
+ kernel will try to probe the RAM size at runtime. This is not
+ supported on all CPU types.
-config RAM16MB
- bool "16MiB"
+config VECTORBASE
+ hex "Address of the base of system vectors"
+ default "0"
help
- Set RAM size to be 16MiB.
+ Define the address of the the system vectors. Commonly this is
+ put at the start of RAM, but it doesn't have to be. On ColdFire
+ platforms this address is programmed into the VBR register, thus
+ actually setting the address to use.
-config RAM32MB
- bool "32MiB"
+config KERNELBASE
+ hex "Address of the base of kernel code"
+ default "0x400"
help
- Set RAM size to be 32MiB.
-
-endchoice
+ Typically on m68k systems the kernel will not start at the base
+ of RAM, but usually some small offset from it. Define the start
+ address of the kernel here. The most common setup will have the
+ processor vectors at the base of RAM and then the start of the
+ kernel. On some platforms some RAM is reserved for boot loaders
+ and the kernel starts after that. The 0x400 default was based on
+ a system with the RAM based at address 0, and leaving enough room
+ for the theoretical maximum number of 256 vectors.
choice
prompt "RAM bus width"
@@ -565,7 +519,7 @@ choice
config RAMAUTOBIT
bool "AUTO"
- ---help---
+ help
Select the physical RAM data bus size. Not needed on most platforms,
so you can generally choose AUTO.
@@ -586,6 +540,59 @@ config RAM32BIT
endchoice
+comment "ROM configuration"
+
+config ROM
+ bool "Specify ROM linker regions"
+ default n
+ help
+ Define a ROM region for the linker script. This creates a kernel
+ that can be stored in flash, with possibly the text, and data
+ regions being copied out to RAM at startup.
+
+config ROMBASE
+ hex "Address of the base of ROM device"
+ default "0"
+ depends on ROM
+ help
+ Define the address that the ROM region starts at. Some platforms
+ use this to set their chip select region accordingly for the boot
+ device.
+
+config ROMVEC
+ hex "Address of the base of the ROM vectors"
+ default "0"
+ depends on ROM
+ help
+ This is almost always the same as the base of the ROM. Since on all
+ 68000 type varients the vectors are at the base of the boot device
+ on system startup.
+
+config ROMVECSIZE
+ hex "Size of ROM vector region (in bytes)"
+ default "0x400"
+ depends on ROM
+ help
+ Define the size of the vector region in ROM. For most 68000
+ varients this would be 0x400 bytes in size. Set to 0 if you do
+ not want a vector region at the start of the ROM.
+
+config ROMSTART
+ hex "Address of the base of system image in ROM"
+ default "0x400"
+ depends on ROM
+ help
+ Define the start address of the system image in ROM. Commonly this
+ is strait after the ROM vectors.
+
+config ROMSIZE
+ hex "Size of the ROM device"
+ default "0x100000"
+ depends on ROM
+ help
+ Size of the ROM device. On some platforms this is used to setup
+ the chip select that controls the boot ROM device.
+
choice
prompt "Kernel executes from"
---help---
@@ -599,7 +606,9 @@ config RAMKERNEL
config ROMKERNEL
bool "ROM"
help
- The kernel will be resident in FLASH/ROM when running.
+ The kernel will be resident in FLASH/ROM when running. This is
+ often referred to as Execute-in-Place (XIP), since the kernel
+ code executes from the position it is stored in the FLASH/ROM.
endchoice
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 6f880cbff1c8..8951793fd8d4 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -21,6 +21,7 @@ platform-$(CONFIG_M527x) := 527x
platform-$(CONFIG_M5272) := 5272
platform-$(CONFIG_M528x) := 528x
platform-$(CONFIG_M5307) := 5307
+platform-$(CONFIG_M532x) := 532x
platform-$(CONFIG_M5407) := 5407
PLATFORM := $(platform-y)
@@ -44,6 +45,7 @@ board-$(CONFIG_senTec) := senTec
board-$(CONFIG_SNEHA) := SNEHA
board-$(CONFIG_M5208EVB) := M5208EVB
board-$(CONFIG_MOD5272) := MOD5272
+board-$(CONFIG_AVNET) := AVNET
BOARD := $(board-y)
model-$(CONFIG_RAMKERNEL) := ram
@@ -65,6 +67,7 @@ cpuclass-$(CONFIG_M527x) := 5307
cpuclass-$(CONFIG_M5272) := 5307
cpuclass-$(CONFIG_M528x) := 5307
cpuclass-$(CONFIG_M5307) := 5307
+cpuclass-$(CONFIG_M532x) := 5307
cpuclass-$(CONFIG_M5407) := 5307
cpuclass-$(CONFIG_M68328) := 68328
cpuclass-$(CONFIG_M68EZ328) := 68328
@@ -81,16 +84,17 @@ export PLATFORM BOARD MODEL CPUCLASS
#
# Some CFLAG additions based on specific CPU type.
#
-cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5272) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M528x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5307) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5407) := -m5200 -Wa,-S -Wa,-m5200
+cflags-$(CONFIG_M5206) := -m5200
+cflags-$(CONFIG_M5206e) := -m5200
+cflags-$(CONFIG_M520x) := -m5307
+cflags-$(CONFIG_M523x) := -m5307
+cflags-$(CONFIG_M5249) := -m5200
+cflags-$(CONFIG_M527x) := -m5307
+cflags-$(CONFIG_M5272) := -m5307
+cflags-$(CONFIG_M528x) := -m5307
+cflags-$(CONFIG_M5307) := -m5307
+cflags-$(CONFIG_M532x) := -m5307
+cflags-$(CONFIG_M5407) := -m5200
cflags-$(CONFIG_M68328) := -m68000
cflags-$(CONFIG_M68EZ328) := -m68000
cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 2d59ba1a79ba..3891de09ac23 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,21 +1,22 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-uc0
-# Wed Aug 31 15:03:26 2005
+# Linux kernel version: 2.6.17
+# Tue Jun 27 12:57:06 2006
#
-CONFIG_M68KNOMMU=y
+CONFIG_M68K=y
# CONFIG_MMU is not set
# CONFIG_FPU is not set
-CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_TIME_LOW_RES=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -23,26 +24,30 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
# General setup
#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_SYSCTL is not set
# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_KOBJECT_UEVENT is not set
# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EMBEDDED=y
# CONFIG_KALLSYMS is not set
+# CONFIG_HOTPLUG is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
+CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -50,6 +55,24 @@ CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
#
+# Block layer
+#
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
# Processor type and features
#
# CONFIG_M68328 is not set
@@ -58,6 +81,7 @@ CONFIG_BASE_SMALL=0
# CONFIG_M68360 is not set
# CONFIG_M5206 is not set
# CONFIG_M5206e is not set
+# CONFIG_M520x is not set
# CONFIG_M523x is not set
# CONFIG_M5249 is not set
# CONFIG_M5271 is not set
@@ -65,29 +89,12 @@ CONFIG_M5272=y
# CONFIG_M5275 is not set
# CONFIG_M528x is not set
# CONFIG_M5307 is not set
+# CONFIG_M532x is not set
# CONFIG_M5407 is not set
CONFIG_COLDFIRE=y
-# CONFIG_CLOCK_AUTO is not set
-# CONFIG_CLOCK_11MHz is not set
-# CONFIG_CLOCK_16MHz is not set
-# CONFIG_CLOCK_20MHz is not set
-# CONFIG_CLOCK_24MHz is not set
-# CONFIG_CLOCK_25MHz is not set
-# CONFIG_CLOCK_33MHz is not set
-# CONFIG_CLOCK_40MHz is not set
-# CONFIG_CLOCK_45MHz is not set
-# CONFIG_CLOCK_48MHz is not set
-# CONFIG_CLOCK_50MHz is not set
-# CONFIG_CLOCK_54MHz is not set
-# CONFIG_CLOCK_60MHz is not set
-# CONFIG_CLOCK_62_5MHz is not set
-# CONFIG_CLOCK_64MHz is not set
-CONFIG_CLOCK_66MHz=y
-# CONFIG_CLOCK_70MHz is not set
-# CONFIG_CLOCK_100MHz is not set
-# CONFIG_CLOCK_140MHz is not set
-# CONFIG_CLOCK_150MHz is not set
-# CONFIG_CLOCK_166MHz is not set
+CONFIG_CLOCK_SET=y
+CONFIG_CLOCK_FREQ=66666666
+CONFIG_CLOCK_DIV=1
#
# Platform
@@ -102,11 +109,14 @@ CONFIG_M5272C3=y
CONFIG_FREESCALE=y
# CONFIG_LARGE_ALLOCS is not set
CONFIG_4KSTACKS=y
-CONFIG_RAMAUTO=y
-# CONFIG_RAM4MB is not set
-# CONFIG_RAM8MB is not set
-# CONFIG_RAM16MB is not set
-# CONFIG_RAM32MB is not set
+
+#
+# RAM configuration
+#
+CONFIG_RAMBASE=0x0
+CONFIG_RAMSIZE=0x800000
+CONFIG_VECTORBASE=0x0
+CONFIG_KERNELBASE=0x20000
CONFIG_RAMAUTOBIT=y
# CONFIG_RAM8BIT is not set
# CONFIG_RAM16BIT is not set
@@ -119,6 +129,8 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
@@ -140,6 +152,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_BINFMT_FLAT=y
# CONFIG_BINFMT_ZFLAT is not set
# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
@@ -155,6 +168,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@@ -171,18 +185,30 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_INET_DIAG is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETFILTER is not set
#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -195,8 +221,11 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -205,6 +234,7 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
#
# Device Drivers
@@ -218,6 +248,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
# Memory Technology Devices (MTD)
#
CONFIG_MTD=y
@@ -235,6 +270,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_FTL is not set
# CONFIG_NFTL is not set
# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
#
# RAM/ROM/Flash chip drivers
@@ -254,13 +290,13 @@ CONFIG_MTD_CFI_I2=y
CONFIG_MTD_RAM=y
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_UCLINUX=y
-# CONFIG_MTD_SNAPGEARuC is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -269,7 +305,6 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLKMTD is not set
# CONFIG_MTD_BLOCK2MTD is not set
#
@@ -285,6 +320,11 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_NAND is not set
#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
# Parallel port support
#
# CONFIG_PARPORT is not set
@@ -296,7 +336,6 @@ CONFIG_MTD_UCLINUX=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
@@ -304,16 +343,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_INITRD is not set
-CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_ATA_OVER_ETH is not set
#
@@ -324,6 +354,7 @@ CONFIG_IOSCHED_NOOP=y
#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
#
@@ -354,13 +385,15 @@ CONFIG_NETDEVICES=y
# CONFIG_TUN is not set
#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NE2000 is not set
-# CONFIG_NET_PCI is not set
CONFIG_FEC=y
# CONFIG_FEC2 is not set
@@ -392,6 +425,7 @@ CONFIG_PPP=y
# CONFIG_PPP_SYNC_TTY is not set
# CONFIG_PPP_DEFLATE is not set
# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
# CONFIG_PPPOE is not set
# CONFIG_SLIP is not set
# CONFIG_SHAPER is not set
@@ -425,8 +459,6 @@ CONFIG_PPP=y
#
# CONFIG_VT is not set
# CONFIG_SERIAL_NONSTANDARD is not set
-# CONFIG_LEDMAN is not set
-# CONFIG_RESETSWITCH is not set
#
# Serial drivers
@@ -450,8 +482,6 @@ CONFIG_LEGACY_PTY_COUNT=256
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
-# CONFIG_MCFWATCHDOG is not set
-# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
@@ -464,14 +494,19 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# TPM devices
#
-# CONFIG_MCF_QSPI is not set
-# CONFIG_M41T11M6 is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
-# CONFIG_I2C_SENSOR is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
#
# Dallas's 1-wire bus
@@ -482,6 +517,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# Hardware Monitoring support
#
# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
#
# Misc devices
@@ -491,6 +527,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
@@ -503,11 +540,6 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_FB is not set
#
-# SPI support
-#
-# CONFIG_SPI is not set
-
-#
# Sound
#
# CONFIG_SOUND is not set
@@ -517,6 +549,11 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
#
# USB Gadget Support
@@ -529,29 +566,43 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_MMC is not set
#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
# InfiniBand support
#
#
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-
-#
-# XFS support
-#
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
CONFIG_ROMFS_FS=y
# CONFIG_INOTIFY is not set
@@ -559,6 +610,7 @@ CONFIG_ROMFS_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
@@ -581,6 +633,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -611,6 +664,7 @@ CONFIG_RAMFS=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
#
# Partition Types
@@ -627,8 +681,12 @@ CONFIG_MSDOS_PARTITION=y
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_UNWIND_INFO is not set
# CONFIG_FULLDEBUG is not set
# CONFIG_HIGHPROFILE is not set
# CONFIG_BOOTPARAM is not set
@@ -655,5 +713,6 @@ CONFIG_LOG_BUF_SHIFT=14
# Library routines
#
# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
diff --git a/arch/m68knommu/kernel/comempci.c b/arch/m68knommu/kernel/comempci.c
index 8670938f1107..db7a0c1cebae 100644
--- a/arch/m68knommu/kernel/comempci.c
+++ b/arch/m68knommu/kernel/comempci.c
@@ -357,7 +357,8 @@ void pcibios_fixup_bus(struct pci_bus *b)
/*****************************************************************************/
-void pcibios_align_resource(void *data, struct resource *res, unsigned long size, unsigned long align)
+void pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
{
}
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 93120b9bfff1..99d038e9ab31 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -42,7 +42,6 @@
#include <asm/pgtable.h>
#endif
-unsigned long rom_length;
unsigned long memory_start;
unsigned long memory_end;
@@ -56,29 +55,29 @@ static void dummy_waitbut(void)
{
}
-void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)) = NULL;
-void (*mach_tick)( void ) = NULL;
+void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *));
+void (*mach_tick)( void );
/* machine dependent keyboard functions */
-int (*mach_keyb_init) (void) = NULL;
-int (*mach_kbdrate) (struct kbd_repeat *) = NULL;
-void (*mach_kbd_leds) (unsigned int) = NULL;
+int (*mach_keyb_init) (void);
+int (*mach_kbdrate) (struct kbd_repeat *);
+void (*mach_kbd_leds) (unsigned int);
/* machine dependent irq functions */
-void (*mach_init_IRQ) (void) = NULL;
-irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *) = NULL;
-int (*mach_get_irq_list) (struct seq_file *, void *) = NULL;
-void (*mach_process_int) (int irq, struct pt_regs *fp) = NULL;
+void (*mach_init_IRQ) (void);
+irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *);
+int (*mach_get_irq_list) (struct seq_file *, void *);
+void (*mach_process_int) (int irq, struct pt_regs *fp);
void (*mach_trap_init) (void);
/* machine dependent timer functions */
-unsigned long (*mach_gettimeoffset) (void) = NULL;
-void (*mach_gettod) (int*, int*, int*, int*, int*, int*) = NULL;
-int (*mach_hwclk) (int, struct hwclk_time*) = NULL;
-int (*mach_set_clock_mmss) (unsigned long) = NULL;
-void (*mach_mksound)( unsigned int count, unsigned int ticks ) = NULL;
-void (*mach_reset)( void ) = NULL;
+unsigned long (*mach_gettimeoffset) (void);
+void (*mach_gettod) (int*, int*, int*, int*, int*, int*);
+int (*mach_hwclk) (int, struct hwclk_time*);
+int (*mach_set_clock_mmss) (unsigned long);
+void (*mach_mksound)( unsigned int count, unsigned int ticks );
+void (*mach_reset)( void );
void (*waitbut)(void) = dummy_waitbut;
-void (*mach_debug_init)(void) = NULL;
-void (*mach_halt)( void ) = NULL;
-void (*mach_power_off)( void ) = NULL;
+void (*mach_debug_init)(void);
+void (*mach_halt)( void );
+void (*mach_power_off)( void );
#ifdef CONFIG_M68000
@@ -129,6 +128,9 @@ void (*mach_power_off)( void ) = NULL;
#if defined(CONFIG_M5307)
#define CPU "COLDFIRE(m5307)"
#endif
+#if defined(CONFIG_M532x)
+ #define CPU "COLDFIRE(m532x)"
+#endif
#if defined(CONFIG_M5407)
#define CPU "COLDFIRE(m5407)"
#endif
@@ -267,34 +269,6 @@ void setup_arch(char **cmdline_p)
paging_init();
}
-int get_cpuinfo(char * buffer)
-{
- char *cpu, *mmu, *fpu;
- u_long clockfreq;
-
- cpu = CPU;
- mmu = "none";
- fpu = "none";
-
-#ifdef CONFIG_COLDFIRE
- clockfreq = (loops_per_jiffy*HZ)*3;
-#else
- clockfreq = (loops_per_jiffy*HZ)*16;
-#endif
-
- return(sprintf(buffer, "CPU:\t\t%s\n"
- "MMU:\t\t%s\n"
- "FPU:\t\t%s\n"
- "Clocking:\t%lu.%1luMHz\n"
- "BogoMips:\t%lu.%02lu\n"
- "Calibration:\t%lu loops\n",
- cpu, mmu, fpu,
- clockfreq/1000000,(clockfreq/100000)%10,
- (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
- (loops_per_jiffy*HZ)));
-
-}
-
/*
* Get CPU information for use by the procfs.
*/
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 8e2c5a88efa7..437f8c6c14a0 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -608,7 +608,7 @@ adjust_stack:
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if DEBUG
+#if defined(DEBUG)
printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
@@ -678,7 +678,7 @@ adjust_stack:
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if DEBUG
+#if defined(DEBUG)
printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index 5bc068462864..44ff74e643b1 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -93,12 +93,12 @@ asmlinkage void buserr_c(struct frame *fp)
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
-#if DEBUG
+#if defined(DEBUG)
printk (KERN_DEBUG "*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
die_if_kernel("bad frame format",&fp->ptregs,0);
-#if DEBUG
+#if defined(DEBUG)
printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index a331cc90797c..59ced831b792 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -1,205 +1,30 @@
/*
* vmlinux.lds.S -- master linker script for m68knommu arch
*
- * (C) Copyright 2002-2004, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
*
- * This ends up looking compilcated, because of the number of
- * address variations for ram and rom/flash layouts. The real
- * work of the linker script is all at the end, and reasonably
- * strait forward.
+ * This linker script is equiped to build either ROM loaded or RAM
+ * run kernels.
*/
#include <linux/config.h>
#include <asm-generic/vmlinux.lds.h>
-/*
- * Original Palm pilot (same for Xcopilot).
- * There is really only a rom target for this.
- */
-#ifdef CONFIG_PILOT3
-#define ROMVEC_START 0x10c00000
-#define ROMVEC_LENGTH 0x10400
-#define ROM_START 0x10c10400
-#define ROM_LENGTH 0xfec00
-#define ROM_END 0x10d00000
-#define RAMVEC_START 0x00000000
-#define RAMVEC_LENGTH 0x400
-#define RAM_START 0x10000400
-#define RAM_LENGTH 0xffc00
-#define RAM_END 0x10100000
-#define _ramend _ram_end_notused
-#define DATA_ADDR RAM_START
-#endif
-
-/*
- * Same setup on both the uCsimm and uCdimm.
- */
-#if defined(CONFIG_UCSIMM) || defined(CONFIG_UCDIMM)
-#ifdef CONFIG_RAMKERNEL
-#define ROMVEC_START 0x10c10000
-#define ROMVEC_LENGTH 0x400
-#define ROM_START 0x10c10400
-#define ROM_LENGTH 0x1efc00
-#define ROM_END 0x10e00000
-#define RAMVEC_START 0x00000000
-#define RAMVEC_LENGTH 0x400
-#define RAM_START 0x00020400
-#define RAM_LENGTH 0x7dfc00
-#define RAM_END 0x00800000
-#endif
-#ifdef CONFIG_ROMKERNEL
-#define ROMVEC_START 0x10c10000
-#define ROMVEC_LENGTH 0x400
-#define ROM_START 0x10c10400
-#define ROM_LENGTH 0x1efc00
-#define ROM_END 0x10e00000
-#define RAMVEC_START 0x00000000
-#define RAMVEC_LENGTH 0x400
-#define RAM_START 0x00020000
-#define RAM_LENGTH 0x600000
-#define RAM_END 0x00800000
-#endif
-#ifdef CONFIG_HIMEMKERNEL
-#define ROMVEC_START 0x00600000
-#define ROMVEC_LENGTH 0x400
-#define ROM_START 0x00600400
-#define ROM_LENGTH 0x1efc00
-#define ROM_END 0x007f0000
-#define RAMVEC_START 0x00000000
-#define RAMVEC_LENGTH 0x400
-#define RAM_START 0x00020000
-#define RAM_LENGTH 0x5e0000
-#define RAM_END 0x00600000
-#endif
-#endif
-
-#ifdef CONFIG_DRAGEN2
-#define RAM_START 0x10000
-#define RAM_LENGTH 0x7f0000
-#endif
-
-#ifdef CONFIG_UCQUICC
-#define ROMVEC_START 0x00000000
-#define ROMVEC_LENGTH 0x404
-#define ROM_START 0x00000404
-#define ROM_LENGTH 0x1ff6fc
-#define ROM_END 0x00200000
-#define RAMVEC_START 0x00200000
-#define RAMVEC_LENGTH 0x404
-#define RAM_START 0x00200404
-#define RAM_LENGTH 0x1ff6fc
-#define RAM_END 0x00400000
-#endif
-
-/*
- * The standard Arnewsh 5206 board only has 1MiB of ram. Not normally
- * enough to be useful. Assume the user has fitted something larger,
- * at least 4MiB in size. No point in not letting the kernel completely
- * link, it will be obvious if it is too big when they go to load it.
- */
-#if defined(CONFIG_ARN5206)
-#define RAM_START 0x10000
-#define RAM_LENGTH 0x3f0000
-#endif
-
-/*
- * The Motorola 5206eLITE board only has 1MiB of static RAM.
- */
-#if defined(CONFIG_ELITE)
-#define RAM_START 0x30020000
-#define RAM_LENGTH 0xe0000
-#endif
-
-/*
- * All the Motorola eval boards have the same basic arrangement.
- * The end of RAM will vary depending on how much ram is fitted,
- * but this isn't important here, we assume at least 4MiB.
- */
-#if defined(CONFIG_M5206eC3) || defined(CONFIG_M5249C3) || \
- defined(CONFIG_M5272C3) || defined(CONFIG_M5307C3) || \
- defined(CONFIG_ARN5307) || defined(CONFIG_M5407C3) || \
- defined(CONFIG_M5271EVB) || defined(CONFIG_M5275EVB) || \
- defined(CONFIG_M5235EVB)
-#define RAM_START 0x20000
-#define RAM_LENGTH 0x3e0000
-#endif
-
-/*
- * The Freescale 5208EVB board has 32MB of RAM.
- */
-#if defined(CONFIG_M5208EVB)
-#define RAM_START 0x40020000
-#define RAM_LENGTH 0x01fe0000
-#endif
-
-/*
- * The senTec COBRA5272 board has nearly the same memory layout as
- * the M5272C3. We assume 16MiB ram.
- */
-#if defined(CONFIG_COBRA5272)
-#define RAM_START 0x20000
-#define RAM_LENGTH 0xfe0000
-#endif
-
-#if defined(CONFIG_M5282EVB)
-#define RAM_START 0x10000
-#define RAM_LENGTH 0x3f0000
-#endif
-
-/*
- * The senTec COBRA5282 board has the same memory layout as the M5282EVB.
- */
-#if defined(CONFIG_COBRA5282)
-#define RAM_START 0x10000
-#define RAM_LENGTH 0x3f0000
-#endif
-
-
-/*
- * The EMAC SoM-5282EM module.
- */
-#if defined(CONFIG_SOM5282EM)
-#define RAM_START 0x10000
-#define RAM_LENGTH 0xff0000
-#endif
-
-
-/*
- * These flash boot boards use all of ram for operation. Again the
- * actual memory size is not important here, assume at least 4MiB.
- * They currently have no support for running in flash.
- */
-#if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
- defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
- defined(CONFIG_HW_FEITH)
-#define RAM_START 0x400
-#define RAM_LENGTH 0x3ffc00
-#endif
-
-/*
- * Sneha Boards mimimun memory
- * The end of RAM will vary depending on how much ram is fitted,
- * but this isn't important here, we assume at least 4MiB.
- */
-#if defined(CONFIG_CPU16B)
-#define RAM_START 0x20000
-#define RAM_LENGTH 0x3e0000
-#endif
-
-#if defined(CONFIG_MOD5272)
-#define RAM_START 0x02000000
-#define RAM_LENGTH 0x00800000
-#define RAMVEC_START 0x20000000
-#define RAMVEC_LENGTH 0x00000400
-#endif
-
#if defined(CONFIG_RAMKERNEL)
+#define RAM_START CONFIG_KERNELBASE
+#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
#define TEXT ram
#define DATA ram
#define INIT ram
#define BSS ram
#endif
#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
+#define RAM_START CONFIG_RAMBASE
+#define RAM_LENGTH CONFIG_RAMSIZE
+#define ROMVEC_START CONFIG_ROMVEC
+#define ROMVEC_LENGTH CONFIG_ROMVECSIZE
+#define ROM_START CONFIG_ROMSTART
+#define ROM_LENGTH CONFIG_ROMSIZE
#define TEXT rom
#define DATA ram
#define INIT ram
@@ -215,17 +40,10 @@ OUTPUT_ARCH(m68k)
ENTRY(_start)
MEMORY {
-#ifdef RAMVEC_START
- ramvec : ORIGIN = RAMVEC_START, LENGTH = RAMVEC_LENGTH
-#endif
ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
-#ifdef RAM_END
- eram : ORIGIN = RAM_END, LENGTH = 0
-#endif
#ifdef ROM_START
romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
- erom : ORIGIN = ROM_END, LENGTH = 0
#endif
}
@@ -302,19 +120,6 @@ SECTIONS {
_etext = . ;
} > TEXT
-#ifdef ROM_END
- . = ROM_END ;
- .erom : {
- __rom_end = . ;
- } > erom
-#endif
-#ifdef RAMVEC_START
- . = RAMVEC_START ;
- .ramvec : {
- __ramvec = .;
- } > ramvec
-#endif
-
.data DATA_ADDR : {
. = ALIGN(4);
_sdata = . ;
@@ -373,12 +178,5 @@ SECTIONS {
_ebss = . ;
} > BSS
-#ifdef RAM_END
- . = RAM_END ;
- .eram : {
- __ramend = . ;
- _ramend = . ;
- } > eram
-#endif
}
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index d79503fe6e42..70d1653be3da 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -63,8 +63,6 @@ static unsigned long empty_bad_page;
unsigned long empty_zero_page;
-extern unsigned long rom_length;
-
void show_mem(void)
{
unsigned long i;
@@ -178,11 +176,9 @@ void mem_init(void)
initk = (&__init_begin - &__init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
- printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
+ printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n",
tmp >> 10,
len >> 10,
- (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
- rom_length >> 10,
codek,
datak
);
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 8d1619dc1ea6..2fd37dcc309b 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_M5249) += timers.o
obj-$(CONFIG_M527x) += pit.o
obj-$(CONFIG_M5272) += timers.o
obj-$(CONFIG_M5307) += config.o timers.o
+obj-$(CONFIG_M532x) += timers.o
obj-$(CONFIG_M528x) += pit.o
obj-$(CONFIG_M5407) += timers.o
diff --git a/arch/m68knommu/platform/5307/entry.S b/arch/m68knommu/platform/5307/entry.S
index 89b180d4ed6a..9ddf5476ef8f 100644
--- a/arch/m68knommu/platform/5307/entry.S
+++ b/arch/m68knommu/platform/5307/entry.S
@@ -4,8 +4,8 @@
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
- * Copyright (C) 2004 Macq Electronique SA. (www.macqel.com)
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
*
* Based on:
*
@@ -56,32 +56,27 @@ sw_usp:
.globl inthandler
.globl fasthandler
+enosys:
+ mov.l #sys_ni_syscall,%d3
+ bra 1f
+
ENTRY(system_call)
SAVE_ALL
move #0x2000,%sr /* enable intrs again */
- movel #-LENOSYS,%d2
- movel %d2,PT_D0(%sp) /* default return value in d0 */
- /* original D0 is in orig_d0 */
- movel %d0,%d2
-
- /* save top of frame */
- pea %sp@
- jbsr set_esp0
- addql #4,%sp
-
- cmpl #NR_syscalls,%d2
- jcc ret_from_exception
+ cmpl #NR_syscalls,%d0
+ jcc enosys
lea sys_call_table,%a0
- lsll #2,%d2 /* movel %a0@(%d2:l:4),%d3 */
- movel %a0@(%d2),%d3
- jeq ret_from_exception
- lsrl #2,%d2
+ lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
+ movel %a0@(%d0),%d3
+ jeq enosys
+1:
movel %sp,%d2 /* get thread_info pointer */
andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
movel %d2,%a0
- btst #TIF_SYSCALL_TRACE,%a0@(TI_FLAGS)
+ movel %sp,%a0@(THREAD_ESP0) /* save top of frame */
+ btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
bnes 1f
movel %d3,%a0
@@ -126,8 +121,8 @@ Luser_return:
jne Lwork_to_do /* still work to do */
Lreturn:
- move #0x2700,%sr /* disable intrs */
- movel sw_usp,%a0 /* get usp */
+ move #0x2700,%sr /* disable intrs */
+ movel sw_usp,%a0 /* get usp */
movel %sp@(PT_PC),%a0@- /* copy exception program counter */
movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
moveml %sp@,%d1-%d5/%a0-%a2
@@ -170,7 +165,7 @@ ENTRY(inthandler)
movel %d0,%sp@(PT_ORIG_D0)
addql #1,local_irq_count
- movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
+ movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
leal per_cpu__kstat+STAT_IRQ,%a0
@@ -184,7 +179,7 @@ ENTRY(inthandler)
movel %sp,%sp@- /* push regs arg onto stack */
movel %a0@(8),%sp@- /* push devid arg */
- movel %d0,%sp@- /* push vector # on stack */
+ movel %d0,%sp@- /* push vector # on stack */
movel %a0@,%a0 /* get function to call */
jbsr %a0@ /* call vector handler */
@@ -201,7 +196,7 @@ ENTRY(inthandler)
ENTRY(fasthandler)
SAVE_LOCAL
- movew %sp@(PT_FORMATVEC),%d0
+ movew %sp@(PT_FORMATVEC),%d0
andl #0x03fc,%d0 /* mask out vector only */
leal per_cpu__kstat+STAT_IRQ,%a0
@@ -210,7 +205,7 @@ ENTRY(fasthandler)
movel %sp,%sp@- /* push regs arg onto stack */
clrl %sp@- /* push devid arg */
lsrl #2,%d0 /* calculate real vector # */
- movel %d0,%sp@- /* push vector # on stack */
+ movel %d0,%sp@- /* push vector # on stack */
lsll #4,%d0 /* adjust for array offset */
lea irq_list,%a0
@@ -265,4 +260,3 @@ ENTRY(resume)
movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
movew %d0, %sr
rts
-
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index c30c462b99b1..1d9eb301d7ac 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -3,7 +3,7 @@
/*
* head.S -- common startup code for ColdFire CPUs.
*
- * (C) Copyright 1999-2004, Greg Ungerer (gerg@snapgear.com).
+ * (C) Copyright 1999-2006, Greg Ungerer <gerg@snapgear.com>.
*/
/*****************************************************************************/
@@ -19,47 +19,15 @@
/*****************************************************************************/
/*
- * Define fixed memory sizes. Configuration of a fixed memory size
- * overrides everything else. If the user defined a size we just
- * blindly use it (they know what they are doing right :-)
- */
-#if defined(CONFIG_RAM32MB)
-#define MEM_SIZE 0x02000000 /* memory size 32Mb */
-#elif defined(CONFIG_RAM16MB)
-#define MEM_SIZE 0x01000000 /* memory size 16Mb */
-#elif defined(CONFIG_RAM8MB)
-#define MEM_SIZE 0x00800000 /* memory size 8Mb */
-#elif defined(CONFIG_RAM4MB)
-#define MEM_SIZE 0x00400000 /* memory size 4Mb */
-#elif defined(CONFIG_RAM1MB)
-#define MEM_SIZE 0x00100000 /* memory size 1Mb */
-#endif
-
-/*
- * Memory size exceptions for special cases. Some boards may be set
- * for auto memory sizing, but we can't do it that way for some reason.
- * For example the 5206eLITE board has static RAM, and auto-detecting
- * the SDRAM will do you no good at all. Same goes for the MOD5272.
- */
-#ifdef CONFIG_RAMAUTO
-#if defined(CONFIG_M5206eLITE)
-#define MEM_SIZE 0x00100000 /* 1MiB default memory */
-#endif
-#if defined(CONFIG_MOD5272)
-#define MEM_SIZE 0x00800000 /* 8MiB default memory */
-#endif
-#endif /* CONFIG_RAMAUTO */
-
-
-/*
- * If we don't have a fixed memory size now, then lets build in code
+ * If we don't have a fixed memory size, then lets build in code
* to auto detect the DRAM size. Obviously this is the prefered
- * method, and should work for most boards (it won't work for those
- * that do not have their RAM starting at address 0).
+ * method, and should work for most boards. It won't work for those
+ * that do not have their RAM starting at address 0, and it only
+ * works on SDRAM (not boards fitted with SRAM).
*/
-#if defined(MEM_SIZE)
+#if CONFIG_RAMSIZE != 0
.macro GET_MEM_SIZE
- movel #MEM_SIZE,%d0 /* hard coded memory size */
+ movel #CONFIG_RAMSIZE,%d0 /* hard coded memory size */
.endm
#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
@@ -98,37 +66,7 @@
.endm
#else
-#error "ERROR: I don't know how to determine your boards memory size?"
-#endif
-
-
-/*
- * Most ColdFire boards have their DRAM starting at address 0.
- * Notable exception is the 5206eLITE board, another is the MOD5272.
- */
-#if defined(CONFIG_M5206eLITE)
-#define MEM_BASE 0x30000000
-#endif
-#if defined(CONFIG_MOD5272)
-#define MEM_BASE 0x02000000
-#define VBR_BASE 0x20000000 /* vectors in SRAM */
-#endif
-#if defined(CONFIG_M5208EVB)
-#define MEM_BASE 0x40000000
-#endif
-
-#ifndef MEM_BASE
-#define MEM_BASE 0x00000000 /* memory base at address 0 */
-#endif
-
-/*
- * The default location for the vectors is at the base of RAM.
- * Some boards might like to use internal SRAM or something like
- * that. If no board specific header defines an alternative then
- * use the base of RAM.
- */
-#ifndef VBR_BASE
-#define VBR_BASE MEM_BASE /* vector address */
+#error "ERROR: I don't know how to probe your boards memory size?"
#endif
/*****************************************************************************/
@@ -191,11 +129,11 @@ _start:
* Create basic memory configuration. Set VBR accordingly,
* and size memory.
*/
- movel #VBR_BASE,%a7
+ movel #CONFIG_VECTORBASE,%a7
movec %a7,%VBR /* set vectors addr */
movel %a7,_ramvec
- movel #MEM_BASE,%a7 /* mark the base of RAM */
+ movel #CONFIG_RAMBASE,%a7 /* mark the base of RAM */
movel %a7,_rambase
GET_MEM_SIZE /* macro code determines size */
diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c
index 323f2677e49d..ef174748825f 100644
--- a/arch/m68knommu/platform/5307/pit.c
+++ b/arch/m68knommu/platform/5307/pit.c
@@ -1,11 +1,11 @@
/***************************************************************************/
/*
- * pit.c -- Motorola ColdFire PIT timer. Currently this type of
- * hardware timer only exists in the Motorola ColdFire
+ * pit.c -- Freescale ColdFire PIT timer. Currently this type of
+ * hardware timer only exists in the Freescale ColdFire
* 5270/5271, 5282 and other CPUs.
*
- * Copyright (C) 1999-2004, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2001-2004, SnapGear Inc. (www.snapgear.com)
*
*/
@@ -18,6 +18,7 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include <asm/irq.h>
#include <asm/coldfire.h>
#include <asm/mcfpit.h>
@@ -25,13 +26,20 @@
/***************************************************************************/
+/*
+ * By default use timer1 as the system clock timer.
+ */
+#define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a))
+
+/***************************************************************************/
+
void coldfire_pit_tick(void)
{
- volatile struct mcfpit *tp;
+ unsigned short pcsr;
/* Reset the ColdFire timer */
- tp = (volatile struct mcfpit *) (MCF_IPSBAR + MCFPIT_BASE1);
- tp->pcsr |= MCFPIT_PCSR_PIF;
+ pcsr = __raw_readw(TA(MCFPIT_PCSR));
+ __raw_writew(pcsr | MCFPIT_PCSR_PIF, TA(MCFPIT_PCSR));
}
/***************************************************************************/
@@ -40,7 +48,6 @@ void coldfire_pit_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
{
volatile unsigned char *icrp;
volatile unsigned long *imrp;
- volatile struct mcfpit *tp;
request_irq(MCFINT_VECBASE + MCFINT_PIT1, handler, SA_INTERRUPT,
"ColdFire Timer", NULL);
@@ -53,27 +60,23 @@ void coldfire_pit_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
*imrp &= ~MCFPIT_IMR_IBIT;
/* Set up PIT timer 1 as poll clock */
- tp = (volatile struct mcfpit *) (MCF_IPSBAR + MCFPIT_BASE1);
- tp->pcsr = MCFPIT_PCSR_DISABLE;
-
- tp->pmr = ((MCF_CLK / 2) / 64) / HZ;
- tp->pcsr = MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | MCFPIT_PCSR_OVW |
- MCFPIT_PCSR_RLD | MCFPIT_PCSR_CLK64;
+ __raw_writew(MCFPIT_PCSR_DISABLE, TA(MCFPIT_PCSR));
+ __raw_writew(((MCF_CLK / 2) / 64) / HZ, TA(MCFPIT_PMR));
+ __raw_writew(MCFPIT_PCSR_EN | MCFPIT_PCSR_PIE | MCFPIT_PCSR_OVW |
+ MCFPIT_PCSR_RLD | MCFPIT_PCSR_CLK64, TA(MCFPIT_PCSR));
}
/***************************************************************************/
unsigned long coldfire_pit_offset(void)
{
- volatile struct mcfpit *tp;
volatile unsigned long *ipr;
unsigned long pmr, pcntr, offset;
- tp = (volatile struct mcfpit *) (MCF_IPSBAR + MCFPIT_BASE1);
ipr = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFPIT_IMR);
- pmr = *(&tp->pmr);
- pcntr = *(&tp->pcntr);
+ pmr = __raw_readw(TA(MCFPIT_PMR));
+ pcntr = __raw_readw(TA(MCFPIT_PCNTR));
/*
* If we are still in the first half of the upcount and a
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index ef49596aa09c..83b8b89dfa09 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -14,6 +14,7 @@
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/machdep.h>
@@ -24,6 +25,11 @@
/***************************************************************************/
/*
+ * By default use timer1 as the system clock timer.
+ */
+#define TA(a) (MCF_MBAR + MCFTIMER_BASE1 + (a))
+
+/*
* Default the timer and vector to use for ColdFire. Some ColdFire
* CPU's and some boards may want different. Their sub-architecture
* startup code (in config.c) can change these if they want.
@@ -32,8 +38,6 @@ unsigned int mcf_timervector = 29;
unsigned int mcf_profilevector = 31;
unsigned int mcf_timerlevel = 5;
-static volatile struct mcftimer *mcf_timerp;
-
/*
* These provide the underlying interrupt vector support.
* Unfortunately it is a little different on each ColdFire.
@@ -46,20 +50,17 @@ extern int mcf_timerirqpending(int timer);
void coldfire_tick(void)
{
/* Reset the ColdFire timer */
- mcf_timerp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF;
+ __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
}
/***************************************************************************/
void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
{
- /* Set up an internal TIMER as poll clock */
- mcf_timerp = (volatile struct mcftimer *) (MCF_MBAR + MCFTIMER_BASE1);
- mcf_timerp->tmr = MCFTIMER_TMR_DISABLE;
-
- mcf_timerp->trr = (unsigned short) ((MCF_BUSCLK / 16) / HZ);
- mcf_timerp->tmr = MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
- MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE;
+ __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
+ __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
+ __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
+ MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
request_irq(mcf_timervector, handler, SA_INTERRUPT, "timer", NULL);
mcf_settimericr(1, mcf_timerlevel);
@@ -75,13 +76,8 @@ unsigned long coldfire_timer_offset(void)
{
unsigned long trr, tcn, offset;
- /*
- * The change to pointer and de-reference is to force the compiler
- * to read the registers with a single 16bit access. Otherwise it
- * does some crazy 8bit read combining.
- */
- tcn = *(&mcf_timerp->tcn);
- trr = *(&mcf_timerp->trr);
+ tcn = __raw_readw(TA(MCFTIMER_TCN));
+ trr = __raw_readw(TA(MCFTIMER_TRR));
offset = (tcn * (1000000 / HZ)) / trr;
/* Check if we just wrapped the counters and maybe missed a tick */
@@ -95,21 +91,23 @@ unsigned long coldfire_timer_offset(void)
/***************************************************************************/
/*
+ * By default use timer2 as the profiler clock timer.
+ */
+#define PA(a) (MCF_MBAR + MCFTIMER_BASE2 + (a))
+
+/*
* Choose a reasonably fast profile timer. Make it an odd value to
* try and get good coverage of kernal operations.
*/
#define PROFILEHZ 1013
-static volatile struct mcftimer *mcf_proftp;
-
/*
* Use the other timer to provide high accuracy profiling info.
*/
-
void coldfire_profile_tick(int irq, void *dummy, struct pt_regs *regs)
{
/* Reset ColdFire timer2 */
- mcf_proftp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF;
+ __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, PA(MCFTIMER_TER));
if (current->pid)
profile_tick(CPU_PROFILING, regs);
}
@@ -121,12 +119,11 @@ void coldfire_profile_init(void)
printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n", PROFILEHZ);
/* Set up TIMER 2 as high speed profile clock */
- mcf_proftp = (volatile struct mcftimer *) (MCF_MBAR + MCFTIMER_BASE2);
- mcf_proftp->tmr = MCFTIMER_TMR_DISABLE;
+ __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
- mcf_proftp->trr = (unsigned short) ((MCF_CLK / 16) / PROFILEHZ);
- mcf_proftp->tmr = MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
- MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE;
+ __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+ __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
+ MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
request_irq(mcf_profilevector, coldfire_profile_tick,
(SA_INTERRUPT | IRQ_FLG_FAST), "profile timer", NULL);
diff --git a/arch/m68knommu/platform/532x/Makefile b/arch/m68knommu/platform/532x/Makefile
new file mode 100644
index 000000000000..12301803b9eb
--- /dev/null
+++ b/arch/m68knommu/platform/532x/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the m68knommu linux kernel.
+#
+
+#
+# If you want to play with the HW breakpoints then you will
+# need to add define this, which will give you a stack backtrace
+# on the console port whenever a DBG interrupt occurs. You have to
+# set up you HW breakpoints to trigger a DBG interrupt:
+#
+# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
+# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+#
+
+ifdef CONFIG_FULLDEBUG
+AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
+endif
+
+#obj-y := config.o usb-mcf532x.o spi-mcf532x.o
+obj-y := config.o
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
new file mode 100644
index 000000000000..ceef9bc181ea
--- /dev/null
+++ b/arch/m68knommu/platform/532x/config.c
@@ -0,0 +1,486 @@
+/***************************************************************************/
+
+/*
+ * linux/arch/m68knommu/platform/532x/config.c
+ *
+ * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 2000, Lineo (www.lineo.com)
+ * Yaroslav Vinogradov yaroslav.vinogradov@freescale.com
+ * Copyright Freescale Semiconductor, Inc 2006
+ * Copyright (c) 2006, emlix, Sebastian Hess <sh@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/***************************************************************************/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/mcftimer.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfdma.h>
+#include <asm/mcfwdebug.h>
+
+/***************************************************************************/
+
+void coldfire_tick(void);
+void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+unsigned long coldfire_timer_offset(void);
+void coldfire_trap_init(void);
+void coldfire_reset(void);
+
+extern unsigned int mcf_timervector;
+extern unsigned int mcf_profilevector;
+extern unsigned int mcf_timerlevel;
+
+/***************************************************************************/
+
+/*
+ * DMA channel base address table.
+ */
+unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = { };
+unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
+
+/***************************************************************************/
+
+void mcf_settimericr(unsigned int timer, unsigned int level)
+{
+ volatile unsigned char *icrp;
+ unsigned int icr;
+ unsigned char irq;
+
+ if (timer <= 2) {
+ switch (timer) {
+ case 2: irq = 33; icr = MCFSIM_ICR_TIMER2; break;
+ default: irq = 32; icr = MCFSIM_ICR_TIMER1; break;
+ }
+
+ icrp = (volatile unsigned char *) (MCF_MBAR + icr);
+ *icrp = level;
+ mcf_enable_irq0(irq);
+ }
+}
+
+/***************************************************************************/
+
+int mcf_timerirqpending(int timer)
+{
+ unsigned int imr = 0;
+
+ switch (timer) {
+ case 1: imr = 0x1; break;
+ case 2: imr = 0x2; break;
+ default: break;
+ }
+ return (mcf_getiprh() & imr);
+}
+
+/***************************************************************************/
+
+void config_BSP(char *commandp, int size)
+{
+ mcf_setimr(MCFSIM_IMR_MASKALL);
+
+#if defined(CONFIG_BOOTPARAM)
+ strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
+ commandp[size-1] = 0;
+#else
+ /* Copy command line from FLASH to local buffer... */
+ memcpy(commandp, (char *) 0x4000, 4);
+ if(strncmp(commandp, "kcl ", 4) == 0){
+ memcpy(commandp, (char *) 0x4004, size);
+ commandp[size-1] = 0;
+ } else {
+ memset(commandp, 0, size);
+ }
+#endif
+
+ mcf_timervector = 64+32;
+ mcf_profilevector = 64+33;
+ mach_sched_init = coldfire_timer_init;
+ mach_tick = coldfire_tick;
+ mach_gettimeoffset = coldfire_timer_offset;
+ mach_trap_init = coldfire_trap_init;
+ mach_reset = coldfire_reset;
+
+#ifdef MCF_BDM_DISABLE
+ /*
+ * Disable the BDM clocking. This also turns off most of the rest of
+ * the BDM device. This is good for EMC reasons. This option is not
+ * incompatible with the memory protection option.
+ */
+ wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
+#endif
+}
+
+/***************************************************************************/
+/* Board initialization */
+
+/********************************************************************/
+/*
+ * PLL min/max specifications
+ */
+#define MAX_FVCO 500000 /* KHz */
+#define MAX_FSYS 80000 /* KHz */
+#define MIN_FSYS 58333 /* KHz */
+#define FREF 16000 /* KHz */
+
+
+#define MAX_MFD 135 /* Multiplier */
+#define MIN_MFD 88 /* Multiplier */
+#define BUSDIV 6 /* Divider */
+
+/*
+ * Low Power Divider specifications
+ */
+#define MIN_LPD (1 << 0) /* Divider (not encoded) */
+#define MAX_LPD (1 << 15) /* Divider (not encoded) */
+#define DEFAULT_LPD (1 << 1) /* Divider (not encoded) */
+
+#define SYS_CLK_KHZ 80000
+#define SYSTEM_PERIOD 12.5
+/*
+ * SDRAM Timing Parameters
+ */
+#define SDRAM_BL 8 /* # of beats in a burst */
+#define SDRAM_TWR 2 /* in clocks */
+#define SDRAM_CASL 2.5 /* CASL in clocks */
+#define SDRAM_TRCD 2 /* in clocks */
+#define SDRAM_TRP 2 /* in clocks */
+#define SDRAM_TRFC 7 /* in clocks */
+#define SDRAM_TREFI 7800 /* in ns */
+
+#define EXT_SRAM_ADDRESS (0xC0000000)
+#define FLASH_ADDRESS (0x00000000)
+#define SDRAM_ADDRESS (0x40000000)
+
+#define NAND_FLASH_ADDRESS (0xD0000000)
+
+int sys_clk_khz = 0;
+int sys_clk_mhz = 0;
+
+void wtm_init(void);
+void scm_init(void);
+void gpio_init(void);
+void fbcs_init(void);
+void sdramc_init(void);
+int clock_pll (int fsys, int flags);
+int clock_limp (int);
+int clock_exit_limp (void);
+int get_sys_clock (void);
+
+asmlinkage void __init sysinit(void)
+{
+ sys_clk_khz = clock_pll(0, 0);
+ sys_clk_mhz = sys_clk_khz/1000;
+
+ wtm_init();
+ scm_init();
+ gpio_init();
+ fbcs_init();
+ sdramc_init();
+}
+
+void wtm_init(void)
+{
+ /* Disable watchdog timer */
+ MCF_WTM_WCR = 0;
+}
+
+#define MCF_SCM_BCR_GBW (0x00000100)
+#define MCF_SCM_BCR_GBR (0x00000200)
+
+void scm_init(void)
+{
+ /* All masters are trusted */
+ MCF_SCM_MPR = 0x77777777;
+
+ /* Allow supervisor/user, read/write, and trusted/untrusted
+ access to all slaves */
+ MCF_SCM_PACRA = 0;
+ MCF_SCM_PACRB = 0;
+ MCF_SCM_PACRC = 0;
+ MCF_SCM_PACRD = 0;
+ MCF_SCM_PACRE = 0;
+ MCF_SCM_PACRF = 0;
+
+ /* Enable bursts */
+ MCF_SCM_BCR = (MCF_SCM_BCR_GBR | MCF_SCM_BCR_GBW);
+}
+
+
+void fbcs_init(void)
+{
+ MCF_GPIO_PAR_CS = 0x0000003E;
+
+ /* Latch chip select */
+ MCF_FBCS1_CSAR = 0x10080000;
+
+ MCF_FBCS1_CSCR = 0x002A3780;
+ MCF_FBCS1_CSMR = (MCF_FBCS_CSMR_BAM_2M | MCF_FBCS_CSMR_V);
+
+ /* Initialize latch to drive signals to inactive states */
+ *((u16 *)(0x10080000)) = 0xFFFF;
+
+ /* External SRAM */
+ MCF_FBCS1_CSAR = EXT_SRAM_ADDRESS;
+ MCF_FBCS1_CSCR = (MCF_FBCS_CSCR_PS_16
+ | MCF_FBCS_CSCR_AA
+ | MCF_FBCS_CSCR_SBM
+ | MCF_FBCS_CSCR_WS(1));
+ MCF_FBCS1_CSMR = (MCF_FBCS_CSMR_BAM_512K
+ | MCF_FBCS_CSMR_V);
+
+ /* Boot Flash connected to FBCS0 */
+ MCF_FBCS0_CSAR = FLASH_ADDRESS;
+ MCF_FBCS0_CSCR = (MCF_FBCS_CSCR_PS_16
+ | MCF_FBCS_CSCR_BEM
+ | MCF_FBCS_CSCR_AA
+ | MCF_FBCS_CSCR_SBM
+ | MCF_FBCS_CSCR_WS(7));
+ MCF_FBCS0_CSMR = (MCF_FBCS_CSMR_BAM_32M
+ | MCF_FBCS_CSMR_V);
+}
+
+void sdramc_init(void)
+{
+ /*
+ * Check to see if the SDRAM has already been initialized
+ * by a run control tool
+ */
+ if (!(MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF)) {
+ /* SDRAM chip select initialization */
+
+ /* Initialize SDRAM chip select */
+ MCF_SDRAMC_SDCS0 = (0
+ | MCF_SDRAMC_SDCS_BA(SDRAM_ADDRESS)
+ | MCF_SDRAMC_SDCS_CSSZ(MCF_SDRAMC_SDCS_CSSZ_32MBYTE));
+
+ /*
+ * Basic configuration and initialization
+ */
+ MCF_SDRAMC_SDCFG1 = (0
+ | MCF_SDRAMC_SDCFG1_SRD2RW((int)((SDRAM_CASL + 2) + 0.5 ))
+ | MCF_SDRAMC_SDCFG1_SWT2RD(SDRAM_TWR + 1)
+ | MCF_SDRAMC_SDCFG1_RDLAT((int)((SDRAM_CASL*2) + 2))
+ | MCF_SDRAMC_SDCFG1_ACT2RW((int)((SDRAM_TRCD ) + 0.5))
+ | MCF_SDRAMC_SDCFG1_PRE2ACT((int)((SDRAM_TRP ) + 0.5))
+ | MCF_SDRAMC_SDCFG1_REF2ACT((int)(((SDRAM_TRFC) ) + 0.5))
+ | MCF_SDRAMC_SDCFG1_WTLAT(3));
+ MCF_SDRAMC_SDCFG2 = (0
+ | MCF_SDRAMC_SDCFG2_BRD2PRE(SDRAM_BL/2 + 1)
+ | MCF_SDRAMC_SDCFG2_BWT2RW(SDRAM_BL/2 + SDRAM_TWR)
+ | MCF_SDRAMC_SDCFG2_BRD2WT((int)((SDRAM_CASL+SDRAM_BL/2-1.0)+0.5))
+ | MCF_SDRAMC_SDCFG2_BL(SDRAM_BL-1));
+
+
+ /*
+ * Precharge and enable write to SDMR
+ */
+ MCF_SDRAMC_SDCR = (0
+ | MCF_SDRAMC_SDCR_MODE_EN
+ | MCF_SDRAMC_SDCR_CKE
+ | MCF_SDRAMC_SDCR_DDR
+ | MCF_SDRAMC_SDCR_MUX(1)
+ | MCF_SDRAMC_SDCR_RCNT((int)(((SDRAM_TREFI/(SYSTEM_PERIOD*64)) - 1) + 0.5))
+ | MCF_SDRAMC_SDCR_PS_16
+ | MCF_SDRAMC_SDCR_IPALL);
+
+ /*
+ * Write extended mode register
+ */
+ MCF_SDRAMC_SDMR = (0
+ | MCF_SDRAMC_SDMR_BNKAD_LEMR
+ | MCF_SDRAMC_SDMR_AD(0x0)
+ | MCF_SDRAMC_SDMR_CMD);
+
+ /*
+ * Write mode register and reset DLL
+ */
+ MCF_SDRAMC_SDMR = (0
+ | MCF_SDRAMC_SDMR_BNKAD_LMR
+ | MCF_SDRAMC_SDMR_AD(0x163)
+ | MCF_SDRAMC_SDMR_CMD);
+
+ /*
+ * Execute a PALL command
+ */
+ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IPALL;
+
+ /*
+ * Perform two REF cycles
+ */
+ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IREF;
+ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IREF;
+
+ /*
+ * Write mode register and clear reset DLL
+ */
+ MCF_SDRAMC_SDMR = (0
+ | MCF_SDRAMC_SDMR_BNKAD_LMR
+ | MCF_SDRAMC_SDMR_AD(0x063)
+ | MCF_SDRAMC_SDMR_CMD);
+
+ /*
+ * Enable auto refresh and lock SDMR
+ */
+ MCF_SDRAMC_SDCR &= ~MCF_SDRAMC_SDCR_MODE_EN;
+ MCF_SDRAMC_SDCR |= (0
+ | MCF_SDRAMC_SDCR_REF
+ | MCF_SDRAMC_SDCR_DQS_OE(0xC));
+ }
+}
+
+void gpio_init(void)
+{
+ /* Enable UART0 pins */
+ MCF_GPIO_PAR_UART = ( 0
+ | MCF_GPIO_PAR_UART_PAR_URXD0
+ | MCF_GPIO_PAR_UART_PAR_UTXD0);
+
+ /* Initialize TIN3 as a GPIO output to enable the write
+ half of the latch */
+ MCF_GPIO_PAR_TIMER = 0x00;
+ MCF_GPIO_PDDR_TIMER = 0x08;
+ MCF_GPIO_PCLRR_TIMER = 0x0;
+
+}
+
+int clock_pll(int fsys, int flags)
+{
+ int fref, temp, fout, mfd;
+ u32 i;
+
+ fref = FREF;
+
+ if (fsys == 0) {
+ /* Return current PLL output */
+ mfd = MCF_PLL_PFDR;
+
+ return (fref * mfd / (BUSDIV * 4));
+ }
+
+ /* Check bounds of requested system clock */
+ if (fsys > MAX_FSYS)
+ fsys = MAX_FSYS;
+ if (fsys < MIN_FSYS)
+ fsys = MIN_FSYS;
+
+ /* Multiplying by 100 when calculating the temp value,
+ and then dividing by 100 to calculate the mfd allows
+ for exact values without needing to include floating
+ point libraries. */
+ temp = 100 * fsys / fref;
+ mfd = 4 * BUSDIV * temp / 100;
+
+ /* Determine the output frequency for selected values */
+ fout = (fref * mfd / (BUSDIV * 4));
+
+ /*
+ * Check to see if the SDRAM has already been initialized.
+ * If it has then the SDRAM needs to be put into self refresh
+ * mode before reprogramming the PLL.
+ */
+ if (MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF)
+ /* Put SDRAM into self refresh mode */
+ MCF_SDRAMC_SDCR &= ~MCF_SDRAMC_SDCR_CKE;
+
+ /*
+ * Initialize the PLL to generate the new system clock frequency.
+ * The device must be put into LIMP mode to reprogram the PLL.
+ */
+
+ /* Enter LIMP mode */
+ clock_limp(DEFAULT_LPD);
+
+ /* Reprogram PLL for desired fsys */
+ MCF_PLL_PODR = (0
+ | MCF_PLL_PODR_CPUDIV(BUSDIV/3)
+ | MCF_PLL_PODR_BUSDIV(BUSDIV));
+
+ MCF_PLL_PFDR = mfd;
+
+ /* Exit LIMP mode */
+ clock_exit_limp();
+
+ /*
+ * Return the SDRAM to normal operation if it is in use.
+ */
+ if (MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF)
+ /* Exit self refresh mode */
+ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_CKE;
+
+ /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
+ MCF_SDRAMC_LIMP_FIX = MCF_SDRAMC_REFRESH;
+
+ /* wait for DQS logic to relock */
+ for (i = 0; i < 0x200; i++)
+ ;
+
+ return fout;
+}
+
+int clock_limp(int div)
+{
+ u32 temp;
+
+ /* Check bounds of divider */
+ if (div < MIN_LPD)
+ div = MIN_LPD;
+ if (div > MAX_LPD)
+ div = MAX_LPD;
+
+ /* Save of the current value of the SSIDIV so we don't
+ overwrite the value*/
+ temp = (MCF_CCM_CDR & MCF_CCM_CDR_SSIDIV(0xF));
+
+ /* Apply the divider to the system clock */
+ MCF_CCM_CDR = ( 0
+ | MCF_CCM_CDR_LPDIV(div)
+ | MCF_CCM_CDR_SSIDIV(temp));
+
+ MCF_CCM_MISCCR |= MCF_CCM_MISCCR_LIMP;
+
+ return (FREF/(3*(1 << div)));
+}
+
+int clock_exit_limp(void)
+{
+ int fout;
+
+ /* Exit LIMP mode */
+ MCF_CCM_MISCCR = (MCF_CCM_MISCCR & ~ MCF_CCM_MISCCR_LIMP);
+
+ /* Wait for PLL to lock */
+ while (!(MCF_CCM_MISCCR & MCF_CCM_MISCCR_PLL_LOCK))
+ ;
+
+ fout = get_sys_clock();
+
+ return fout;
+}
+
+int get_sys_clock(void)
+{
+ int divider;
+
+ /* Test to see if device is in LIMP mode */
+ if (MCF_CCM_MISCCR & MCF_CCM_MISCCR_LIMP) {
+ divider = MCF_CCM_CDR & MCF_CCM_CDR_LPDIV(0xF);
+ return (FREF/(2 << divider));
+ }
+ else
+ return ((FREF * MCF_PLL_PFDR) / (BUSDIV * 4));
+}
diff --git a/arch/m68knommu/platform/68328/Makefile b/arch/m68knommu/platform/68328/Makefile
index 1b3b719e4479..5e5435552d56 100644
--- a/arch/m68knommu/platform/68328/Makefile
+++ b/arch/m68knommu/platform/68328/Makefile
@@ -8,6 +8,7 @@ head-$(CONFIG_DRAGEN2) = head-de2.o
obj-y += entry.o ints.o timers.o
obj-$(CONFIG_M68328) += config.o
+obj-$(CONFIG_ROM) += romvec.o
extra-y := head.o
extra-$(CONFIG_M68328) += bootlogo.rh head.o
diff --git a/arch/m68knommu/platform/68328/head-pilot.S b/arch/m68knommu/platform/68328/head-pilot.S
index c46775fe04be..46b3604f999c 100644
--- a/arch/m68knommu/platform/68328/head-pilot.S
+++ b/arch/m68knommu/platform/68328/head-pilot.S
@@ -21,7 +21,6 @@
.global _start
.global _rambase
-.global __ramvec
.global _ramvec
.global _ramstart
.global _ramend
@@ -121,7 +120,7 @@ L0:
DBG_PUTC('B')
/* Copy command line from beginning of RAM (+16) to end of bss */
- movel #__ramvec, %d7
+ movel #CONFIG_VECTORBASE, %d7
addl #16, %d7
moveal %d7, %a0
moveal #_ebss, %a1
diff --git a/arch/m68knommu/platform/68328/head-ram.S b/arch/m68knommu/platform/68328/head-ram.S
index 6bdc9bce43f2..e8dc9241ff96 100644
--- a/arch/m68knommu/platform/68328/head-ram.S
+++ b/arch/m68knommu/platform/68328/head-ram.S
@@ -1,10 +1,7 @@
#include <linux/config.h>
.global __main
- .global __ram_start
- .global __ram_end
.global __rom_start
- .global __rom_end
.global _rambase
.global _ramstart
@@ -12,6 +9,7 @@
.global splash_bits
.global _start
.global _stext
+ .global _edata
#define DEBUG
#define ROM_OFFSET 0x10C00000
@@ -73,7 +71,7 @@ pclp1:
#ifdef CONFIG_RELOCATE
/* Copy me to RAM */
moveal #__rom_start, %a0
- moveal #__ram_start, %a1
+ moveal #_stext, %a1
moveal #_edata, %a2
/* Copy %a0 to %a1 until %a1 == %a2 */
diff --git a/arch/m68knommu/platform/68328/head-rom.S b/arch/m68knommu/platform/68328/head-rom.S
index 2b448a297011..234430b9551c 100644
--- a/arch/m68knommu/platform/68328/head-rom.S
+++ b/arch/m68knommu/platform/68328/head-rom.S
@@ -28,6 +28,8 @@ _ramstart:
_ramend:
.long 0
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#ifdef CONFIG_INIT_LCD
splash_bits:
#include "bootlogo.rh"
@@ -48,7 +50,7 @@ _stext: movew #0x2700,%sr
moveb #0x81, 0xfffffA27 /* LCKCON */
movew #0xff00, 0xfffff412 /* LCD pins */
#endif
- moveal #__ramend-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
+ moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
movew #32767, %d0 /* PLL settle wait loop */
1: subq #1, %d0
bne 1b
@@ -73,13 +75,13 @@ _stext: movew #0x2700,%sr
bhi 1b
movel #_sdata, %d0
- movel %d0, _rambase
- movel #_ebss, %d0
- movel %d0, _ramstart
- movel #__ramend-CONFIG_MEMORY_RESERVE*0x100000, %d0
- movel %d0, _ramend
- movel #__ramvec, %d0
- movel %d0, _ramvec
+ movel %d0, _rambase
+ movel #_ebss, %d0
+ movel %d0, _ramstart
+ movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
+ movel %d0, _ramend
+ movel #CONFIG_VECTORBASE, %d0
+ movel %d0, _ramvec
/*
* load the current task pointer and stack
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 7437217813d2..2dda7339aae5 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -18,6 +18,7 @@
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/irqnode.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/machdep.h>
@@ -82,25 +83,6 @@ unsigned int local_irq_count[NR_CPUS];
/* irq node variables for the 32 (potential) on chip sources */
static irq_node_t int_irq_list[NR_IRQS];
-#if !defined(CONFIG_DRAGEN2)
-asm (".global _start, __ramend/n/t"
- ".section .romvec/n"
- "e_vectors:\n\t"
- ".long __ramend-4, _start, buserr, trap, trap, trap, trap, trap\n\t"
- ".long trap, trap, trap, trap, trap, trap, trap, trap\n\t"
- ".long trap, trap, trap, trap, trap, trap, trap, trap\n\t"
- ".long trap, trap, trap, trap\n\t"
- ".long trap, trap, trap, trap\n\t"
- /*.long inthandler, inthandler, inthandler, inthandler
- .long inthandler4, inthandler, inthandler, inthandler */
- /* TRAP #0-15 */
- ".long system_call, trap, trap, trap, trap, trap, trap, trap\n\t"
- ".long trap, trap, trap, trap, trap, trap, trap, trap\n\t"
- ".long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n\t"
- ".text\n"
- "ignore: rte");
-#endif
-
/*
* This function should be called during kernel startup to initialize
* the IRQ handling routines.
diff --git a/arch/m68knommu/platform/68328/romvec.S b/arch/m68knommu/platform/68328/romvec.S
new file mode 100644
index 000000000000..3e7fe1e14913
--- /dev/null
+++ b/arch/m68knommu/platform/68328/romvec.S
@@ -0,0 +1,37 @@
+/*
+ * linux/arch/m68knommu/platform/68328/romvec.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1996 Roman Zippel
+ * Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
+ * Copyright 2006 Greg Ungerer <gerg@snapgear.com>
+ */
+
+#include <linux/config.h>
+
+.global _start
+.global _buserr
+.global trap
+.global system_call
+
+.section .romvec
+
+e_vectors:
+.long CONFIG_RAMBASE+CONFIG_RAMSIZE-4, _start, buserr, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+/* TRAP #0-15 */
+.long system_call, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long trap, trap, trap, trap
+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index 3db244625f0f..69c670dfd62b 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -141,13 +141,13 @@ int BSP_set_clock_mmss (unsigned long nowtime)
void BSP_reset (void)
{
local_irq_disable();
- asm volatile ("
- moveal #_start, %a0;
- moveb #0, 0xFFFFF300;
- moveal 0(%a0), %sp;
- moveal 4(%a0), %a0;
- jmp (%a0);
- ");
+ asm volatile (
+ "moveal #_start, %a0;\n"
+ "moveb #0, 0xFFFFF300;\n"
+ "moveal 0(%a0), %sp;\n"
+ "moveal 4(%a0), %a0;\n"
+ "jmp (%a0);\n"
+ );
}
unsigned char *scc1_hwaddr;
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index a5c639a51eef..f497713a4ec7 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -18,7 +18,6 @@
.global _start
.global _rambase
-.global __ramvec
.global _ramvec
.global _ramstart
.global _ramend
@@ -26,6 +25,8 @@
.global _quicc_base
.global _periph_base
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#define REGB 0x1000
#define PEPAR (_dprbase + REGB + 0x0016)
#define GMR (_dprbase + REGB + 0x0040)
@@ -103,7 +104,7 @@ _stext:
nop
ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
/* We should not need to setup the boot stack the reset should do it. */
- movea.l #__ramend, %sp /*set up stack at the end of DRAM:*/
+ movea.l #RAMEND, %sp /*set up stack at the end of DRAM:*/
set_mbar_register:
moveq.l #0x07, %d1 /* Setup MBAR */
@@ -163,7 +164,7 @@ configure_memory_controller:
move.l %d0, GMR
configure_chip_select_0:
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
subi.l #__ramstart, %d0
subq.l #0x01, %d0
eori.l #SIM_OR_MASK, %d0
@@ -234,16 +235,10 @@ store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
move.l #_ebss, _ramstart
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
- move.l %d0, _ramend /* Different from __ramend.*/
+ move.l %d0, _ramend /* Different from RAMEND.*/
-store_flash_size:
- /* Set rom size information */
- move.l #__rom_end, %d0
- sub.l #__rom_start, %d0
- move.l %d0, rom_length
-
pea 0
pea env
pea %sp@(4)
@@ -286,7 +281,7 @@ _dprbase:
*/
.section ".data.initvect","awx"
- .long __ramend /* Reset: Initial Stack Pointer - 0. */
+ .long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
.long trap /* Address Error - 3. */
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S
index 0da357a4cfee..2d28c3e19a88 100644
--- a/arch/m68knommu/platform/68360/head-rom.S
+++ b/arch/m68knommu/platform/68360/head-rom.S
@@ -18,7 +18,6 @@
.global _start
.global _rambase
-.global __ramvec
.global _ramvec
.global _ramstart
.global _ramend
@@ -26,6 +25,8 @@
.global _quicc_base
.global _periph_base
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#define REGB 0x1000
#define PEPAR (_dprbase + REGB + 0x0016)
#define GMR (_dprbase + REGB + 0x0040)
@@ -115,7 +116,7 @@ _stext:
nop
ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
/* We should not need to setup the boot stack the reset should do it. */
- movea.l #__ramend, %sp /* set up stack at the end of DRAM:*/
+ movea.l #RAMEND, %sp /* set up stack at the end of DRAM:*/
set_mbar_register:
@@ -245,16 +246,10 @@ store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
move.l #_ebss, _ramstart
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
- move.l %d0, _ramend /* Different from __ramend.*/
+ move.l %d0, _ramend /* Different from RAMEND.*/
-store_flash_size:
- /* Set rom size information */
- move.l #__rom_end, %d0
- sub.l #__rom_start, %d0
- move.l %d0, rom_length
-
pea 0
pea env
pea %sp@(4)
@@ -298,7 +293,7 @@ _dprbase:
*/
.section ".data.initvect","awx"
- .long __ramend /* Reset: Initial Stack Pointer - 0. */
+ .long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
.long trap /* Address Error - 3. */
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index ba184db1651b..0245fc4a4781 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -20,6 +20,7 @@
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/irqnode.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/m68knommu/platform/68EZ328/config.c b/arch/m68knommu/platform/68EZ328/config.c
index d8d56e5de310..15a14a67c2bf 100644
--- a/arch/m68knommu/platform/68EZ328/config.c
+++ b/arch/m68knommu/platform/68EZ328/config.c
@@ -42,13 +42,13 @@ void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int
void m68ez328_reset(void)
{
local_irq_disable();
- asm volatile ("
- moveal #0x10c00000, %a0;
- moveb #0, 0xFFFFF300;
- moveal 0(%a0), %sp;
- moveal 4(%a0), %a0;
- jmp (%a0);
- ");
+ asm volatile (
+ "moveal #0x10c00000, %a0;\n"
+ "moveb #0, 0xFFFFF300;\n"
+ "moveal 0(%a0), %sp;\n"
+ "moveal 4(%a0), %a0;\n"
+ "jmp (%a0);\n"
+ );
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c
index d926524cdf82..4058de5c8fa2 100644
--- a/arch/m68knommu/platform/68VZ328/config.c
+++ b/arch/m68knommu/platform/68VZ328/config.c
@@ -141,13 +141,13 @@ static void init_hardware(char *command, int size)
static void m68vz328_reset(void)
{
local_irq_disable();
- asm volatile ("
- moveal #0x10c00000, %a0;
- moveb #0, 0xFFFFF300;
- moveal 0(%a0), %sp;
- moveal 4(%a0), %a0;
- jmp (%a0);
- ");
+ asm volatile (
+ "moveal #0x10c00000, %a0;\n\t"
+ "moveb #0, 0xFFFFF300;\n\t"
+ "moveal 0(%a0), %sp;\n\t"
+ "moveal 4(%a0), %a0;\n\t"
+ "jmp (%a0);\n"
+ );
}
unsigned char *cs8900a_hwaddr;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 35e038a974c6..08c2ece4ae40 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1618,6 +1618,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config IRQ_PER_CPU
+ depends on SMP
+ bool
+ default y
+
#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index afe05ec12c27..da74ac21954b 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -333,31 +333,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].handler = &rise_edge_irq_type;
+ irq_desc[irq_nr].chip = &rise_edge_irq_type;
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
- irq_desc[irq_nr].handler = &fall_edge_irq_type;
+ irq_desc[irq_nr].chip = &fall_edge_irq_type;
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].handler = &either_edge_irq_type;
+ irq_desc[irq_nr].chip = &either_edge_irq_type;
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].handler = &level_irq_type;
+ irq_desc[irq_nr].chip = &level_irq_type;
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
- irq_desc[irq_nr].handler = &level_irq_type;
+ irq_desc[irq_nr].chip = &level_irq_type;
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
@@ -385,31 +385,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].handler = &rise_edge_irq_type;
+ irq_desc[irq_nr].chip = &rise_edge_irq_type;
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
- irq_desc[irq_nr].handler = &fall_edge_irq_type;
+ irq_desc[irq_nr].chip = &fall_edge_irq_type;
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].handler = &either_edge_irq_type;
+ irq_desc[irq_nr].chip = &either_edge_irq_type;
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].handler = &level_irq_type;
+ irq_desc[irq_nr].chip = &level_irq_type;
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
- irq_desc[irq_nr].handler = &level_irq_type;
+ irq_desc[irq_nr].chip = &level_irq_type;
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<irq_nr, IC0_CFG0CLR);
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index bacc0c6bfe67..5dd164fc1889 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -172,7 +172,7 @@ void _board_init_irq(void)
for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
{
- irq_desc[irq_nr].handler = &external_irq_type;
+ irq_desc[irq_nr].chip = &external_irq_type;
pb1200_disable_irq(irq_nr);
}
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index 5fcd5f070cdc..63c3d6534b3a 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -107,7 +107,7 @@ void __init vrc5477_irq_init(u32 irq_base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &vrc5477_irq_controller;
+ irq_desc[i].chip = &vrc5477_irq_controller;
}
vrc5477_irq_base = irq_base;
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index d5bca5d233b6..da2dbb42f913 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -144,13 +144,13 @@ void __init init_ioasic_irqs(int base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &ioasic_irq_type;
+ irq_desc[i].chip = &ioasic_irq_type;
}
for (; i < base + IO_IRQ_LINES; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &ioasic_dma_irq_type;
+ irq_desc[i].chip = &ioasic_dma_irq_type;
}
ioasic_irq_base = base;
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 898bed502a34..d44c00d9e80f 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -123,7 +123,7 @@ void __init init_kn02_irqs(int base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &kn02_irq_type;
+ irq_desc[i].chip = &kn02_irq_type;
}
kn02_irq_base = base;
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index 46c468b26b30..f489a8067a93 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -138,7 +138,7 @@ void __init arch_init_irq(void)
/* Let's initialize our IRQ descriptors */
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].status = 0;
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
irq_desc[i].action = NULL;
irq_desc[i].depth = 0;
spin_lock_init(&irq_desc[i].lock);
diff --git a/arch/mips/ite-boards/generic/irq.c b/arch/mips/ite-boards/generic/irq.c
index 77be7216bdd0..a6749c56fe38 100644
--- a/arch/mips/ite-boards/generic/irq.c
+++ b/arch/mips/ite-boards/generic/irq.c
@@ -208,10 +208,10 @@ void __init arch_init_irq(void)
#endif
for (i = 0; i <= IT8172_LAST_IRQ; i++) {
- irq_desc[i].handler = &it8172_irq_type;
+ irq_desc[i].chip = &it8172_irq_type;
spin_lock_init(&irq_desc[i].lock);
}
- irq_desc[MIPS_CPU_TIMER_IRQ].handler = &cp0_irq_type;
+ irq_desc[MIPS_CPU_TIMER_IRQ].chip = &cp0_irq_type;
set_c0_status(ALLINTS_NOTIMER);
}
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index becc9accd495..478be9858a1e 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -73,7 +73,7 @@ void __init init_r4030_ints(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &r4030_irq_type;
+ irq_desc[i].chip = &r4030_irq_type;
}
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index 11304d1354f4..380046ea1db5 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -435,7 +435,7 @@ void jmr3927_irq_init(u32 irq_base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &jmr3927_irq_controller;
+ irq_desc[i].chip = &jmr3927_irq_controller;
}
jmr3927_irq_base = irq_base;
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 0cb8ed5662f3..91ffb1233cad 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -120,7 +120,7 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &i8259A_irq_type;
+ irq_desc[irq].chip = &i8259A_irq_type;
enable_irq(irq);
}
@@ -327,7 +327,7 @@ void __init init_i8259_irqs (void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &i8259A_irq_type;
+ irq_desc[i].chip = &i8259A_irq_type;
}
setup_irq(2, &irq2);
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 97ebdc754b9e..f8cd1ac64d88 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -174,14 +174,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
- irq_desc[base+n].handler = &msc_edgeirq_type;
+ irq_desc[base+n].chip = &msc_edgeirq_type;
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
- irq_desc[base+n].handler = &msc_levelirq_type;
+ irq_desc[base+n].chip = &msc_levelirq_type;
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 0613f1f36b1b..f9c763a65547 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -155,7 +155,7 @@ void __init mv64340_irq_init(unsigned int base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
- irq_desc[i].handler = &mv64340_irq_type;
+ irq_desc[i].chip = &mv64340_irq_type;
}
irq_base = base;
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 0b130c5ac5d9..121da385a94d 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -91,7 +91,7 @@ void __init rm7k_cpu_irq_init(int base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &rm7k_irq_controller;
+ irq_desc[i].chip = &rm7k_irq_controller;
}
irq_base = base;
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 9b5f20c32acb..25109c103e44 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -139,11 +139,11 @@ void __init rm9k_cpu_irq_init(int base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &rm9k_irq_controller;
+ irq_desc[i].chip = &rm9k_irq_controller;
}
rm9000_perfcount_irq = base + 1;
- irq_desc[rm9000_perfcount_irq].handler = &rm9k_perfcounter_irq;
+ irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq;
irq_base = base;
}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 3dce742e716f..5c9dcd5eed59 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -95,7 +95,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -137,7 +137,7 @@ void __init init_IRQ(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
spin_lock_init(&irq_desc[i].lock);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask[i] = 0;
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 5db67e31ec1a..0e455a8ad860 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -167,14 +167,14 @@ void __init mips_cpu_irq_init(int irq_base)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &mips_mt_cpu_irq_controller;
+ irq_desc[i].chip = &mips_mt_cpu_irq_controller;
}
for (i = irq_base + 2; i < irq_base + 8; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &mips_cpu_irq_controller;
+ irq_desc[i].chip = &mips_cpu_irq_controller;
}
mips_cpu_irq_base = irq_base;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 298f82fe8440..9096a5ea4229 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -446,7 +446,7 @@ static int __init topology_init(void)
int ret;
for_each_present_cpu(cpu) {
- ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", cpu, ret);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 2e8e52c135e6..70cf09afdf56 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void)
dvpe();
dmt();
- freeIPIq.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&freeIPIq.lock);
/*
* We probably don't have as many VPEs as we do SMP "CPUs",
@@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void)
*/
for (i=0; i<NR_CPUS; i++) {
IPIQ[i].head = IPIQ[i].tail = NULL;
- IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
ipi_timer_latch[i] = 0;
}
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 2d3472b21ebb..9316a024a818 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -156,6 +156,6 @@ void __init arch_init_irq(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &lasat_irq_type;
+ irq_desc[i].chip = &lasat_irq_type;
}
}
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index db53950b7cfb..9dd6b8925581 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -215,7 +215,7 @@ void __init arch_init_irq(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &atlas_irq_type;
+ irq_desc[i].chip = &atlas_irq_type;
spin_lock_init(&irq_desc[i].lock);
}
}
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index bd885785e2f9..31d179c4673f 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -147,6 +147,6 @@ void cpci_irq_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
- irq_desc[i].handler = &cpci_irq_type;
+ irq_desc[i].chip = &cpci_irq_type;
}
}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index 755bde5146be..852265026fd1 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -137,10 +137,10 @@ void uart_irq_init(void)
irq_desc[80].status = IRQ_DISABLED;
irq_desc[80].action = 0;
irq_desc[80].depth = 2;
- irq_desc[80].handler = &uart_irq_type;
+ irq_desc[80].chip = &uart_irq_type;
irq_desc[81].status = IRQ_DISABLED;
irq_desc[81].action = 0;
irq_desc[81].depth = 2;
- irq_desc[81].handler = &uart_irq_type;
+ irq_desc[81].chip = &uart_irq_type;
}
diff --git a/arch/mips/momentum/ocelot_g/gt-irq.c b/arch/mips/momentum/ocelot_g/gt-irq.c
index e5eceed1beff..8bd9b844fa9e 100644
--- a/arch/mips/momentum/ocelot_g/gt-irq.c
+++ b/arch/mips/momentum/ocelot_g/gt-irq.c
@@ -59,7 +59,7 @@ void hook_irq_handler(int int_cause, int bit_num, void *isr_ptr)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
- * 1 if succesful, 0 if failure
+ * 1 if successful, 0 if failure
*/
int enable_galileo_irq(int int_cause, int bit_num)
{
@@ -83,7 +83,7 @@ int enable_galileo_irq(int int_cause, int bit_num)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
- * 1 if succesful, 0 if failure
+ * 1 if successful, 0 if failure
*/
int disable_galileo_irq(int int_cause, int bit_num)
{
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index c31e4cff64e0..65eb55400d77 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -38,7 +38,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
- char buf[3];
+ char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 4dfce154d4af..ba66f8c9bd4e 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -51,11 +51,11 @@ unsigned long PCIBIOS_MIN_MEM = 0;
*/
void
pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_controller *hose = dev->sysdata;
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
/* Make sure we start at our min on all hoses */
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 39ee6314f627..8f18764a2359 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -236,7 +236,7 @@ void __init arch_init_irq(void)
int configPR;
for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) {
- irq_desc[i].handler = &level_irq_type;
+ irq_desc[i].chip = &level_irq_type;
pnx8550_ack(i); /* mask the irq just in case */
}
@@ -273,7 +273,7 @@ void __init arch_init_irq(void)
/* mask/priority is still 0 so we will not get any
* interrupts until it is unmasked */
- irq_desc[i].handler = &level_irq_type;
+ irq_desc[i].chip = &level_irq_type;
}
/* Priority level 0 */
@@ -282,12 +282,12 @@ void __init arch_init_irq(void)
/* Set int vector table address */
PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
- irq_desc[MIPS_CPU_GIC_IRQ].handler = &level_irq_type;
+ irq_desc[MIPS_CPU_GIC_IRQ].chip = &level_irq_type;
setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
/* init of Timer interrupts */
for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) {
- irq_desc[i].handler = &level_irq_type;
+ irq_desc[i].chip = &level_irq_type;
}
/* Stop Timer 1-3 */
@@ -295,7 +295,7 @@ void __init arch_init_irq(void)
configPR |= 0x00000038;
write_c0_config7(configPR);
- irq_desc[MIPS_CPU_TIMER_IRQ].handler = &level_irq_type;
+ irq_desc[MIPS_CPU_TIMER_IRQ].chip = &level_irq_type;
setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
}
diff --git a/arch/mips/pmc-sierra/yosemite/ht.c b/arch/mips/pmc-sierra/yosemite/ht.c
index 54b65a80abf5..fb523ebcafa8 100644
--- a/arch/mips/pmc-sierra/yosemite/ht.c
+++ b/arch/mips/pmc-sierra/yosemite/ht.c
@@ -383,12 +383,12 @@ void pcibios_update_resource(struct pci_dev *dev, struct resource *root,
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
/* We need to avoid collisions with `mirrored' VGA ports
and other strange ISA hardware, so we always want the
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index b19820110aa3..989167b49ce9 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -279,9 +279,9 @@ int __init ip22_eisa_init(void)
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < (SGINT_EISA + 8))
- irq_desc[i].handler = &ip22_eisa1_irq_type;
+ irq_desc[i].chip = &ip22_eisa1_irq_type;
else
- irq_desc[i].handler = &ip22_eisa2_irq_type;
+ irq_desc[i].chip = &ip22_eisa2_irq_type;
}
/* Cannot use request_irq because of kmalloc not being ready at such
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index fc6a7e2b189c..18906af69691 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -436,7 +436,7 @@ void __init arch_init_irq(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = handler;
+ irq_desc[i].chip = handler;
}
/* vector handler. this register the IRQ as non-sharable */
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index a9c58e067b53..8134220ed600 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -34,7 +34,7 @@
#define POWERDOWN_TIMEOUT 120
/*
- * Blink frequency during reboot grace period and when paniced.
+ * Blink frequency during reboot grace period and when panicked.
*/
#define POWERDOWN_FREQ (HZ / 4)
#define PANIC_FREQ (HZ / 8)
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 0b61a39ce2bb..869566c360ae 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -386,7 +386,7 @@ void __devinit register_bridge_irq(unsigned int irq)
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 1;
- irq_desc[irq].handler = &bridge_irq_type;
+ irq_desc[irq].chip = &bridge_irq_type;
}
int __devinit request_bridge_irq(struct bridge_controller *bc)
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index 8ba08047d164..00b94aaf6371 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -591,7 +591,7 @@ void __init arch_init_irq(void)
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 0;
- irq_desc[irq].handler = controller;
+ irq_desc[irq].chip = controller;
}
setup_irq(CRIME_MEMERR_IRQ, &memerr_irq);
setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq);
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index ab9d9cef089e..79ddb4605659 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -28,13 +28,13 @@
#define POWERDOWN_TIMEOUT 120
/*
- * Blink frequency during reboot grace period and when paniced.
+ * Blink frequency during reboot grace period and when panicked.
*/
#define POWERDOWN_FREQ (HZ / 4)
#define PANIC_FREQ (HZ / 8)
static struct timer_list power_timer, blink_timer, debounce_timer;
-static int has_paniced, shuting_down;
+static int has_panicked, shuting_down;
static void ip32_machine_restart(char *command) __attribute__((noreturn));
static void ip32_machine_halt(void) __attribute__((noreturn));
@@ -109,7 +109,7 @@ static void debounce(unsigned long data)
}
CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
- if (has_paniced)
+ if (has_panicked)
ip32_machine_restart(NULL);
enable_irq(MACEISA_RTC_IRQ);
@@ -117,7 +117,7 @@ static void debounce(unsigned long data)
static inline void ip32_power_button(void)
{
- if (has_paniced)
+ if (has_panicked)
return;
if (shuting_down || kill_proc(1, SIGINT, 1)) {
@@ -161,9 +161,9 @@ static int panic_event(struct notifier_block *this, unsigned long event,
{
unsigned long led;
- if (has_paniced)
+ if (has_panicked)
return NOTIFY_DONE;
- has_paniced = 1;
+ has_panicked = 1;
/* turn off the green LED */
led = mace->perif.ctrl.misc | MACEISA_LED_GREEN;
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index e61760b14d99..610df40cb820 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -276,10 +276,10 @@ void __init init_bcm1480_irqs(void)
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < BCM1480_NR_IRQS) {
- irq_desc[i].handler = &bcm1480_irq_type;
+ irq_desc[i].chip = &bcm1480_irq_type;
bcm1480_irq_owner[i] = 0;
} else {
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
}
}
}
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index f853c32f60a0..fcc61940f1ff 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -246,10 +246,10 @@ void __init init_sb1250_irqs(void)
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < SB1250_NR_IRQS) {
- irq_desc[i].handler = &sb1250_irq_type;
+ irq_desc[i].chip = &sb1250_irq_type;
sb1250_irq_owner[i] = 0;
} else {
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
}
}
}
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 7365b4853ddb..c19e158ec402 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -203,7 +203,7 @@ void __init arch_init_irq(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &pciasic_irq_type;
+ irq_desc[i].chip = &pciasic_irq_type;
}
change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ2|IE_IRQ3|IE_IRQ4);
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index 8ca68015cf40..a42be00483e6 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -227,7 +227,7 @@ static void __init tx4927_irq_cp0_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &tx4927_irq_cp0_type;
+ irq_desc[i].chip = &tx4927_irq_cp0_type;
}
return;
@@ -435,7 +435,7 @@ static void __init tx4927_irq_pic_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
- irq_desc[i].handler = &tx4927_irq_pic_type;
+ irq_desc[i].chip = &tx4927_irq_pic_type;
}
setup_irq(TX4927_IRQ_NEST_PIC_ON_CP0, &tx4927_irq_pic_action);
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index aee07ff2212a..c67978b6dae4 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -368,7 +368,7 @@ static void __init toshiba_rbtx4927_irq_ioc_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 3;
- irq_desc[i].handler = &toshiba_rbtx4927_irq_ioc_type;
+ irq_desc[i].chip = &toshiba_rbtx4927_irq_ioc_type;
}
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_IOC_ON_PIC,
@@ -526,7 +526,7 @@ static void __init toshiba_rbtx4927_irq_isa_init(void)
irq_desc[i].action = 0;
irq_desc[i].depth =
((i < TOSHIBA_RBTX4927_IRQ_ISA_MID) ? (4) : (5));
- irq_desc[i].handler = &toshiba_rbtx4927_irq_isa_type;
+ irq_desc[i].chip = &toshiba_rbtx4927_irq_isa_type;
}
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_ISA_ON_IOC,
@@ -692,13 +692,13 @@ void toshiba_rbtx4927_irq_dump(char *key)
{
u32 i, j = 0;
for (i = 0; i < NR_IRQS; i++) {
- if (strcmp(irq_desc[i].handler->typename, "none")
+ if (strcmp(irq_desc[i].chip->typename, "none")
== 0)
continue;
if ((i >= 1)
- && (irq_desc[i - 1].handler->typename ==
- irq_desc[i].handler->typename)) {
+ && (irq_desc[i - 1].chip->typename ==
+ irq_desc[i].chip->typename)) {
j++;
} else {
j = 0;
@@ -707,12 +707,12 @@ void toshiba_rbtx4927_irq_dump(char *key)
(TOSHIBA_RBTX4927_IRQ_INFO,
"%s irq=0x%02x/%3d s=0x%08x h=0x%08x a=0x%08x ah=0x%08x d=%1d n=%s/%02d\n",
key, i, i, irq_desc[i].status,
- (u32) irq_desc[i].handler,
+ (u32) irq_desc[i].chip,
(u32) irq_desc[i].action,
(u32) (irq_desc[i].action ? irq_desc[i].
action->handler : 0),
irq_desc[i].depth,
- irq_desc[i].handler->typename, j);
+ irq_desc[i].chip->typename, j);
}
}
#endif
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index 873805178d8e..0b2f8c849218 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -102,7 +102,7 @@ tx4938_irq_cp0_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &tx4938_irq_cp0_type;
+ irq_desc[i].chip = &tx4938_irq_cp0_type;
}
return;
@@ -306,7 +306,7 @@ tx4938_irq_pic_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
- irq_desc[i].handler = &tx4938_irq_pic_type;
+ irq_desc[i].chip = &tx4938_irq_pic_type;
}
setup_irq(TX4938_IRQ_NEST_PIC_ON_CP0, &tx4938_irq_pic_action);
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index 9cd9c0fe2265..3b8245dc5bd3 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -146,7 +146,7 @@ toshiba_rbtx4938_irq_ioc_init(void)
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 3;
- irq_desc[i].handler = &toshiba_rbtx4938_irq_ioc_type;
+ irq_desc[i].chip = &toshiba_rbtx4938_irq_ioc_type;
}
setup_irq(RBTX4938_IRQ_IOCINT,
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 07ae19cf0c29..b9323302cc4e 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -722,10 +722,10 @@ static int __init vr41xx_icu_init(void)
icu2_write(MGIUINTHREG, 0xffff);
for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
- irq_desc[i].handler = &sysint1_irq_type;
+ irq_desc[i].chip = &sysint1_irq_type;
for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
- irq_desc[i].handler = &sysint2_irq_type;
+ irq_desc[i].chip = &sysint2_irq_type;
cascade_irq(INT0_IRQ, icu_get_irq);
cascade_irq(INT1_IRQ, icu_get_irq);
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 86796bb63c3c..66aa50802deb 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -73,13 +73,13 @@ static void irq_dispatch(unsigned int irq, struct pt_regs *regs)
if (cascade->get_irq != NULL) {
unsigned int source_irq = irq;
desc = irq_desc + source_irq;
- desc->handler->ack(source_irq);
+ desc->chip->ack(source_irq);
irq = cascade->get_irq(irq, regs);
if (irq < 0)
atomic_inc(&irq_err_count);
else
irq_dispatch(irq, regs);
- desc->handler->end(source_irq);
+ desc->chip->end(source_irq);
} else
do_IRQ(irq, regs);
}
diff --git a/arch/mips/vr41xx/common/vrc4173.c b/arch/mips/vr41xx/common/vrc4173.c
index 3e31f8193d21..2d287b8893d9 100644
--- a/arch/mips/vr41xx/common/vrc4173.c
+++ b/arch/mips/vr41xx/common/vrc4173.c
@@ -483,7 +483,7 @@ static inline int vrc4173_icu_init(int cascade_irq)
vr41xx_set_irq_level(GIU_IRQ_TO_PIN(cascade_irq), LEVEL_LOW);
for (i = VRC4173_IRQ_BASE; i <= VRC4173_IRQ_LAST; i++)
- irq_desc[i].handler = &vrc4173_irq_type;
+ irq_desc[i].chip = &vrc4173_irq_type;
return 0;
}
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 31db6b61a39e..7b2511ca0a61 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -104,7 +104,7 @@ void __init rockhopper_init_irq(void)
}
for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
- irq_desc[i].handler = &i8259_irq_type;
+ irq_desc[i].chip = &i8259_irq_type;
setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 910fb3afc0b5..6dd0ea8f88e0 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -51,6 +51,10 @@ config GENERIC_HARDIRQS
config GENERIC_IRQ_PROBE
def_bool y
+config IRQ_PER_CPU
+ bool
+ default y
+
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 197936d9359a..82fe6ba29727 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -94,7 +94,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (irq == TIMER_IRQ || irq == IPI_IRQ) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
- irq_affinity[irq] = CPU_MASK_ALL;
+ irq_desc[irq].affinity = CPU_MASK_ALL;
return -EINVAL;
}
@@ -110,7 +110,7 @@ static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
if (cpu_check_affinity(irq, &dest))
return;
- irq_affinity[irq] = dest;
+ irq_desc[irq].affinity = dest;
}
#endif
@@ -125,6 +125,10 @@ static struct hw_interrupt_type cpu_interrupt_type = {
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
+ /* XXX: Needs to be written. We managed without it so far, but
+ * we really ought to write it.
+ */
+ .retrigger = NULL,
};
int show_interrupts(struct seq_file *p, void *v)
@@ -158,7 +162,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
@@ -210,12 +214,12 @@ int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data)
{
if (irq_desc[irq].action)
return -EBUSY;
- if (irq_desc[irq].handler != &cpu_interrupt_type)
+ if (irq_desc[irq].chip != &cpu_interrupt_type)
return -EBUSY;
if (type) {
- irq_desc[irq].handler = type;
- irq_desc[irq].handler_data = data;
+ irq_desc[irq].chip = type;
+ irq_desc[irq].chip_data = data;
cpu_interrupt_type.enable(irq);
}
return 0;
@@ -265,7 +269,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- irq_affinity[irq] = cpumask_of_cpu(cpu);
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
#endif
return cpu_data[cpu].txn_addr;
@@ -326,7 +330,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
#ifdef CONFIG_SMP
- cpumask_t dest = irq_affinity[irq];
+ cpumask_t dest = irq_desc[irq].affinity;
#endif
if (!(bit & eirr_val))
continue;
@@ -378,7 +382,7 @@ static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- irq_desc[i].handler = &cpu_interrupt_type;
+ irq_desc[i].chip = &cpu_interrupt_type;
}
irq_desc[TIMER_IRQ].action = &timer_action;
@@ -404,13 +408,6 @@ void __init init_IRQ(void)
}
-void hw_resend_irq(struct hw_interrupt_type *type, unsigned int irq)
-{
- /* XXX: Needs to be written. We managed without it so far, but
- * we really ought to write it.
- */
-}
-
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ %d\n", irq);
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 79c7db2705fd..7d6967ee367c 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* than res->start.
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long alignment)
+ resource_size_t size, resource_size_t alignment)
{
unsigned long mask, align;
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index 3ba040050e4c..068b20d822e7 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -26,11 +26,10 @@ static struct cpu cpu_devices[NR_CPUS] __read_mostly;
static int __init topology_init(void)
{
- struct node *parent = NULL;
int num;
for_each_present_cpu(num) {
- register_cpu(&cpu_devices[num], num, parent);
+ register_cpu(&cpu_devices[num], num);
}
return 0;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e922a88b2bad..d43e4521abf2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -30,6 +30,10 @@ config GENERIC_HARDIRQS
bool
default y
+config IRQ_PER_CPU
+ bool
+ default y
+
config RWSEM_GENERIC_SPINLOCK
bool
@@ -618,6 +622,9 @@ config HOTPLUG_CPU
Say N if you are unsure.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
+
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on PPC_MULTIPLATFORM && EXPERIMENTAL
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index dbcb85994f46..22ceba844bf4 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -179,7 +179,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
/*
* This function is only called after the system
- * has paniced or is otherwise in a critical state.
+ * has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
@@ -190,13 +190,13 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
local_irq_disable();
for_each_irq(irq) {
- struct irq_desc *desc = irq_descp(irq);
+ struct irq_desc *desc = irq_desc + irq;
if (desc->status & IRQ_INPROGRESS)
- desc->handler->end(irq);
+ desc->chip->end(irq);
if (!(desc->status & IRQ_DISABLED))
- desc->handler->disable(irq);
+ desc->chip->disable(irq);
}
if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 40d4c14fde8f..24f6050aa4ab 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -120,8 +120,8 @@ int show_interrupts(struct seq_file *p, void *v)
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
- if (desc->handler)
- seq_printf(p, " %s ", desc->handler->typename);
+ if (desc->chip)
+ seq_printf(p, " %s ", desc->chip->typename);
else
seq_puts(p, " None ");
seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
@@ -164,13 +164,13 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].handler->set_affinity)
- irq_desc[irq].handler->set_affinity(irq, mask);
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index 443606134dff..cbaa34196797 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,8 +30,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
*/
void default_machine_kexec(struct kimage *image)
{
- const extern unsigned char relocate_new_kernel[];
- const extern unsigned int relocate_new_kernel_size;
+ extern const unsigned char relocate_new_kernel[];
+ extern const unsigned int relocate_new_kernel_size;
unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
relocate_new_kernel_t rnk;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b5431ccf1147..8474355a1a4f 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -99,7 +99,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
if (!res->flags)
continue;
if (res->end == 0xffffffff) {
- DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
+ DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
pci_name(dev), i, res->start, res->end);
res->end -= res->start;
res->start = 0;
@@ -117,7 +117,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
res->start += offset;
res->end += offset;
#ifdef DEBUG
- printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
+ printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
i, res->flags, pci_name(dev),
res->start - offset, res->start);
#endif
@@ -173,18 +173,18 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
-void pcibios_align_resource(void *data, struct resource *res, unsigned long size,
- unsigned long align)
+void pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%ld bytes)\n", pci_name(dev),
- dev->resource - res, size);
+ " (%lld bytes)\n", pci_name(dev),
+ dev->resource - res, (unsigned long long)size);
}
if (start & 0x300) {
@@ -255,8 +255,8 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
}
}
- DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n",
- res->start, res->end, res->flags, pr);
+ DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
+ res->start, res->end, res->flags, pr);
if (pr) {
if (request_resource(pr, res) == 0)
continue;
@@ -306,7 +306,7 @@ reparent_resources(struct resource *parent, struct resource *res)
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
- DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n",
+ DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
p->name, p->start, p->end, res->name);
}
return 0;
@@ -362,13 +362,14 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
try = conflict->start - 1;
}
if (request_resource(pr, res)) {
- DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n",
+ DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
res->start, res->end);
return -1; /* "can't happen" */
}
update_bridge_base(bus, i);
- printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n",
- bus->number, i, res->start, res->end);
+ printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
+ bus->number, i, (unsigned long long)res->start,
+ (unsigned long long)res->end);
return 0;
}
@@ -479,14 +480,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
- DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n",
+ DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
pci_name(dev), idx, r->start, r->end, r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d"
" of device %s\n", idx, pci_name(dev));
if (pr)
- DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n",
+ DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
pr, pr->start, pr->end, pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
@@ -956,7 +957,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res = &hose->io_resource;
res->flags = IORESOURCE_IO;
res->start = ranges[2];
- DBG("PCI: IO 0x%lx -> 0x%lx\n",
+ DBG("PCI: IO 0x%llx -> 0x%llx\n",
res->start, res->start + size - 1);
break;
case 2: /* memory space */
@@ -978,7 +979,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
if(ranges[0] & 0x40000000)
res->flags |= IORESOURCE_PREFETCH;
res->start = ranges[na+2];
- DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno,
+ DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
res->start, res->start + size - 1);
}
break;
@@ -1074,7 +1075,7 @@ do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
- DBG(" IO window: %08lx-%08lx\n", res.start, res.end);
+ DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
/* Set up the top and bottom of the PCI I/O segment for this bus. */
pci_read_config_dword(bridge, PCI_IO_BASE, &l);
@@ -1223,8 +1224,8 @@ do_fixup_p2p_level(struct pci_bus *bus)
continue;
if ((r->flags & IORESOURCE_IO) == 0)
continue;
- DBG("Trying to allocate from %08lx, size %08lx from parent"
- " res %d: %08lx -> %08lx\n",
+ DBG("Trying to allocate from %016llx, size %016llx from parent"
+ " res %d: %016llx -> %016llx\n",
res->start, res->end, i, r->start, r->end);
if (allocate_resource(r, res, res->end + 1, res->start, max,
@@ -1574,8 +1575,8 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
- prot);
+ printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
+ (unsigned long long)rp->start, prot);
return __pgprot(prot);
}
@@ -1755,7 +1756,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end)
+ resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
unsigned long offset = 0;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 247937dd8b73..286aa52aae33 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -138,11 +138,11 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* which might have be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long start = res->start;
+ resource_size_t start = res->start;
unsigned long alignto;
if (res->flags & IORESOURCE_IO) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5a44812441a..0932a62a1c96 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -215,7 +215,7 @@ int __init ppc_init(void)
/* register CPU devices */
for_each_possible_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
/* call platform init */
if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5bc2585c8036..4662b580efa1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int sysfs_cpu_notify(struct notifier_block *self,
+static int __devinit sysfs_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
@@ -297,30 +297,19 @@ static int sysfs_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block sysfs_cpu_nb = {
+static struct notifier_block __devinitdata sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};
/* NUMA stuff */
#ifdef CONFIG_NUMA
-static struct node node_devices[MAX_NUMNODES];
-
static void register_nodes(void)
{
int i;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (node_online(i)) {
- int p_node = parent_node(i);
- struct node *parent = NULL;
-
- if (p_node != i)
- parent = &node_devices[p_node];
-
- register_node(&node_devices[i], i, parent);
- }
- }
+ for (i = 0; i < MAX_NUMNODES; i++)
+ register_one_node(i);
}
int sysfs_add_device_to_node(struct sys_device *dev, int nid)
@@ -359,23 +348,13 @@ static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
int cpu;
- struct node *parent = NULL;
register_nodes();
-
register_cpu_notifier(&sysfs_cpu_nb);
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
-#ifdef CONFIG_NUMA
- /* The node to which a cpu belongs can't be known
- * until the cpu is made present.
- */
- parent = NULL;
- if (cpu_present(cpu))
- parent = &node_devices[cpu_to_node(cpu)];
-#endif
/*
* For now, we just see if the system supports making
* the RTAS calls for CPU hotplug. But, there may be a
@@ -387,7 +366,7 @@ static int __init topology_init(void)
c->no_control = 1;
if (cpu_online(cpu) || (c->no_control == 0)) {
- register_cpu(c, cpu, parent);
+ register_cpu(c, cpu);
sysdev_create_file(&c->sysdev, &attr_physical_id);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index d20907561f46..7dd5dab789a1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
u64 tb_to_xs;
unsigned tb_to_us;
-#define TICKLEN_SCALE (SHIFT_SCALE - 10)
+#define TICKLEN_SCALE TICK_LENGTH_SHIFT
u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
u64 ticklen_to_xs; /* 0.64 fraction */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index fdbba4206d59..a0a9e1e0061e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -40,6 +40,40 @@
#include <asm/kdebug.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
@@ -142,7 +176,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
- if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return 0;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e30f968c184..d454caada265 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
+#include <linux/poison.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -90,7 +91,7 @@ void free_initmem(void)
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f3b9a20beb..089d939a0b3e 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,15 +114,20 @@ void online_page(struct page *page)
num_physpages++;
}
-int __devinit add_memory(u64 start, u64 size)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return hot_add_scn_to_nid(start);
+}
+#endif
+
+int __devinit arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdata;
struct zone *zone;
- int nid;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- nid = hot_add_scn_to_nid(start);
pgdata = NODE_DATA(nid);
start = (unsigned long)__va(start);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa98cb3b59d8..fbe23933f731 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -334,7 +334,7 @@ out:
return nid;
}
-static int cpu_numa_callback(struct notifier_block *nfb,
+static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size,
return (void *)ret;
}
+static struct notifier_block __cpuinitdata ppc64_numa_nb = {
+ .notifier_call = cpu_numa_callback,
+ .priority = 1 /* Must run before sched domains notifier. */
+};
+
void __init do_init_bootmem(void)
{
int nid;
unsigned int i;
- static struct notifier_block ppc64_numa_nb = {
- .notifier_call = cpu_numa_callback,
- .priority = 1 /* Must run before sched domains notifier. */
- };
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 27ad56bd227e..fd0bbbe7a4de 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -94,7 +94,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
- char buf[3];
+ char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 16f7d3b30e1d..3baceb00fefa 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -91,9 +91,10 @@ int __init add_bridge(struct device_node *dev)
mpc83xx_pci2_busno = hose->first_busno;
}
- printk(KERN_INFO "Found MPC83xx PCI host bridge at 0x%08lx. "
+ printk(KERN_INFO "Found MPC83xx PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
- rsrc.start, hose->first_busno, hose->last_busno);
+ (unsigned long long)rsrc.start, hose->first_busno,
+ hose->last_busno);
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
diff --git a/arch/powerpc/platforms/85xx/pci.c b/arch/powerpc/platforms/85xx/pci.c
index bad290110ed1..48c8849c07ca 100644
--- a/arch/powerpc/platforms/85xx/pci.c
+++ b/arch/powerpc/platforms/85xx/pci.c
@@ -79,9 +79,10 @@ int __init add_bridge(struct device_node *dev)
mpc85xx_pci2_busno = hose->first_busno;
}
- printk(KERN_INFO "Found MPC85xx PCI host bridge at 0x%08lx. "
+ printk(KERN_INFO "Found MPC85xx PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
- rsrc.start, hose->first_busno, hose->last_busno);
+ (unsigned long long)rsrc.start, hose->first_busno,
+ hose->last_busno);
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 1bbf822b4efc..7bff3cbc5723 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -307,7 +307,7 @@ static void iic_request_ipi(int ipi, const char *name)
irq = iic_ipi_to_irq(ipi);
/* IPIs are marked SA_INTERRUPT as they must run with irqs
* disabled */
- get_irq_desc(irq)->handler = &iic_pic;
+ get_irq_desc(irq)->chip = &iic_pic;
get_irq_desc(irq)->status |= IRQ_PER_CPU;
request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL);
}
@@ -330,7 +330,7 @@ static void iic_setup_spe_handlers(void)
for (be=0; be < num_present_cpus() / 2; be++) {
for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
- get_irq_desc(irq)->handler = &iic_pic;
+ get_irq_desc(irq)->chip = &iic_pic;
}
}
}
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 55cbdd77a62d..7c3a0b6d34fd 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -162,7 +162,7 @@ void spider_init_IRQ_hardcoded(void)
spider_pics[node] = ioremap(spiderpic, 0x800);
for (n = 0; n < IIC_NUM_EXT; n++) {
int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
- get_irq_desc(irq)->handler = &spider_pic;
+ get_irq_desc(irq)->chip = &spider_pic;
}
/* do not mask any interrupts because of level */
@@ -217,7 +217,7 @@ void spider_init_IRQ(void)
for (n = 0; n < IIC_NUM_EXT; n++) {
int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
- get_irq_desc(irq)->handler = &spider_pic;
+ get_irq_desc(irq)->chip = &spider_pic;
}
/* do not mask any interrupts because of level */
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 80c02660e617..7854a380dce2 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1150,7 +1150,7 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
return mask;
}
-static int spufs_mfc_flush(struct file *file)
+static int spufs_mfc_flush(struct file *file, fl_owner_t id)
{
struct spu_context *ctx = file->private_data;
int ret;
@@ -1176,7 +1176,7 @@ out:
static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
int datasync)
{
- return spufs_mfc_flush(file);
+ return spufs_mfc_flush(file, NULL);
}
static int spufs_mfc_fasync(int fd, struct file *file, int on)
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index b30e55dab832..a656d810a44a 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(spu_save);
* @spu: pointer to SPU iomem structure.
*
* Perform harvest + restore, as we may not be coming
- * from a previous succesful save operation, and the
+ * from a previous successful save operation, and the
* hardware state is unknown.
*/
int spu_restore(struct spu_state *new, struct spu *spu)
@@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa)
memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
- csa->register_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&csa->register_lock);
/* Set LS pages reserved to allow for user-space mapping. */
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index ac224876ce59..53515daf01b1 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -143,7 +143,7 @@ hydra_init(void)
if (np == NULL || of_address_to_resource(np, 0, &r))
return 0;
Hydra = ioremap(r.start, r.end-r.start);
- printk("Hydra Mac I/O at %lx\n", r.start);
+ printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
in_le32(&Hydra->Feature_Control));
out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
@@ -267,7 +267,7 @@ chrp_find_bridges(void)
bus_range[0], bus_range[1]);
printk(" controlled by %s", dev->type);
if (!is_longtrail)
- printk(" at %lx", r.start);
+ printk(" at %llx", (unsigned long long)r.start);
printk("\n");
hose = pcibios_alloc_controller();
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 62bbbcf5ded3..33bb4aa0e1e8 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -242,9 +242,9 @@ void __init iSeries_activate_IRQs()
for_each_irq (irq) {
irq_desc_t *desc = get_irq_desc(irq);
- if (desc && desc->handler && desc->handler->startup) {
+ if (desc && desc->chip && desc->chip->startup) {
spin_lock_irqsave(&desc->lock, flags);
- desc->handler->startup(irq);
+ desc->chip->startup(irq);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -324,7 +324,7 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
+ function;
virtirq = virt_irq_create_mapping(realirq);
- irq_desc[virtirq].handler = &iSeries_IRQ_handler;
+ irq_desc[virtirq].chip = &iSeries_IRQ_handler;
return virtirq;
}
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 9a4efc0c3b29..f7170ff86dab 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -376,9 +376,10 @@ static void __init maple_fixup_phb_resources(void)
unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
hose->io_resource.start += offset;
hose->io_resource.end += offset;
- printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
+ printk(KERN_INFO "PCI Host %d, io start: %llx; io end: %llx\n",
hose->global_number,
- hose->io_resource.start, hose->io_resource.end);
+ (unsigned long long)hose->io_resource.start,
+ (unsigned long long)hose->io_resource.end);
}
}
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index 498b042e1837..c7a27eddca6d 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -119,7 +119,14 @@ int pmac_backlight_set_legacy_brightness(int brightness)
down(&pmac_backlight->sem);
props = pmac_backlight->props;
props->brightness = brightness *
- props->max_brightness / OLD_BACKLIGHT_MAX;
+ (props->max_brightness + 1) /
+ (OLD_BACKLIGHT_MAX + 1);
+
+ if (props->brightness > props->max_brightness)
+ props->brightness = props->max_brightness;
+ else if (props->brightness < 0)
+ props->brightness = 0;
+
props->update_status(pmac_backlight);
up(&pmac_backlight->sem);
@@ -140,8 +147,11 @@ int pmac_backlight_get_legacy_brightness()
down(&pmac_backlight->sem);
props = pmac_backlight->props;
+
result = props->brightness *
- OLD_BACKLIGHT_MAX / props->max_brightness;
+ (OLD_BACKLIGHT_MAX + 1) /
+ (props->max_brightness + 1);
+
up(&pmac_backlight->sem);
}
mutex_unlock(&pmac_backlight_mutex);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 80035853467b..d524a915aa86 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -939,9 +939,10 @@ static int __init add_bridge(struct device_node *dev)
disp_name = "Chaos";
primary = 0;
}
- printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. "
+ printk(KERN_INFO "Found %s PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
- disp_name, rsrc.start, hose->first_busno, hose->last_busno);
+ disp_name, (unsigned long long)rsrc.start, hose->first_busno,
+ hose->last_busno);
#endif /* CONFIG_PPC32 */
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 047f954a89eb..93e7505debc5 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -546,7 +546,7 @@ struct pmf_device {
};
static LIST_HEAD(pmf_devices);
-static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pmf_lock);
static DEFINE_MUTEX(pmf_irq_mutex);
static void pmf_release_device(struct kref *kref)
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 18bf3011d1e3..9f6189af6dd6 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -446,7 +446,7 @@ static void __init pmac_pic_probe_oldstyle(void)
/* Set the handler for the main PIC */
for ( i = 0; i < max_real_irqs ; i++ )
- irq_desc[i].handler = &pmac_pic;
+ irq_desc[i].chip = &pmac_pic;
/* Get addresses of first controller if we have a node for it */
BUG_ON(of_address_to_resource(master, 0, &r));
@@ -493,7 +493,7 @@ static void __init pmac_pic_probe_oldstyle(void)
/* Setup handlers for secondary controller and hook cascade irq*/
if (slave) {
for ( i = max_real_irqs ; i < max_irqs ; i++ )
- irq_desc[i].handler = &gatwick_pic;
+ irq_desc[i].chip = &gatwick_pic;
setup_irq(irq_cascade, &gatwick_cascade_action);
}
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 98c23aec85be..c37a8497c60f 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -287,7 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
* find the pci device that corresponds to a given address.
* This routine scans all pci busses to build the cache.
* Must be run late in boot process, after the pci controllers
- * have been scaned for devices (after all device resources are known).
+ * have been scanned for devices (after all device resources are known).
*/
void __init pci_addr_cache_build(void)
{
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 8f2d12935b99..45ccc687e57c 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -35,7 +35,7 @@
*/
/* EEH event workqueue setup. */
-static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index b14f9b5c114e..19c03dd43000 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -238,7 +238,7 @@ static int get_irq_server(unsigned int irq)
{
unsigned int server;
/* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t cpumask = irq_affinity[irq];
+ cpumask_t cpumask = irq_desc[irq].affinity;
cpumask_t tmp = CPU_MASK_NONE;
if (!distribute_irqs)
@@ -558,7 +558,7 @@ nextnode:
}
for (i = irq_offset_value(); i < NR_IRQS; ++i)
- get_irq_desc(i)->handler = &xics_pic;
+ get_irq_desc(i)->chip = &xics_pic;
xics_setup_cpu();
@@ -701,9 +701,9 @@ void xics_migrate_irqs_away(void)
continue;
/* We only need to migrate enabled IRQS */
- if (desc == NULL || desc->handler == NULL
+ if (desc == NULL || desc->chip == NULL
|| desc->action == NULL
- || desc->handler->set_affinity == NULL)
+ || desc->chip->set_affinity == NULL)
continue;
spin_lock_irqsave(&desc->lock, flags);
@@ -728,8 +728,8 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
- desc->handler->set_affinity(virq, CPU_MASK_ALL);
- irq_affinity[virq] = CPU_MASK_ALL;
+ desc->chip->set_affinity(virq, CPU_MASK_ALL);
+ irq_desc[irq].affinity = CPU_MASK_ALL;
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index b7ac32fdd776..2bff30f6d635 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -208,7 +208,7 @@ void __init i8259_init(unsigned long intack_addr, int offset)
spin_unlock_irqrestore(&i8259_lock, flags);
for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
- irq_desc[offset + i].handler = &i8259_pic;
+ irq_desc[offset + i].chip = &i8259_pic;
/* reserve our resources */
setup_irq(offset + 2, &i8259_irqaction);
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 8f01e0f1d847..46801f5ec03f 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -472,7 +472,7 @@ void __init ipic_init(phys_addr_t phys_addr,
ipic_write(primary_ipic->regs, IPIC_SEMSR, temp);
for (i = 0 ; i < NR_IPIC_INTS ; i++) {
- irq_desc[i+irq_offset].handler = &ipic;
+ irq_desc[i+irq_offset].chip = &ipic;
irq_desc[i+irq_offset].status = IRQ_LEVEL;
}
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 74e0d31a3559..615350d46b52 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -32,7 +32,7 @@
static void __iomem *mmio_nvram_start;
static long mmio_nvram_len;
-static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mmio_nvram_lock);
static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
{
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bffe50d02c99..28df9c827ca6 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -379,14 +379,14 @@ static inline u32 mpic_physmask(u32 cpumask)
/* Get the mpic structure from the IPI number */
static inline struct mpic * mpic_from_ipi(unsigned int ipi)
{
- return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
+ return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi);
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
- return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
+ return container_of(irq_desc[irq].chip, struct mpic, hc_irq);
}
/* Send an EOI */
@@ -752,7 +752,7 @@ void __init mpic_init(struct mpic *mpic)
if (!(mpic->flags & MPIC_PRIMARY))
continue;
irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
- irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
+ irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi;
#endif /* CONFIG_SMP */
}
@@ -813,7 +813,7 @@ void __init mpic_init(struct mpic *mpic)
/* init linux descriptors */
if (i < mpic->irq_count) {
irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
- irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
+ irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq;
}
}
@@ -906,7 +906,7 @@ void mpic_setup_this_cpu(void)
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
* it differently, then we should make sure we also change the default
- * values of irq_affinity in irq.c.
+ * values of irq_desc[].affinity in irq.c.
*/
if (distribute_irqs) {
for (i = 0; i < mpic->num_sources ; i++)
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 12b84ca51327..9b3ace26280c 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -187,7 +187,7 @@ cpm_interrupt_init(void)
* interrupt vectors
*/
for ( i = CPM_IRQ_OFFSET ; i < CPM_IRQ_OFFSET + NR_CPM_INTS ; i++ )
- irq_desc[i].handler = &cpm_pic;
+ irq_desc[i].chip = &cpm_pic;
/* Set our interrupt handler with the core CPU. */
if (setup_irq(CPM_INTERRUPT, &cpm_interrupt_irqaction))
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index 84d65a87191e..a469ba438cbe 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
unsigned long reboot_code_buffer,
unsigned long start_address) ATTRIB_NORET;
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned int relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
void machine_shutdown(void)
{
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index d20accf9650d..242bb052be67 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -95,8 +95,10 @@ pcibios_fixup_resources(struct pci_dev *dev)
if (!res->flags)
continue;
if (res->end == 0xffffffff) {
- DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
- pci_name(dev), i, res->start, res->end);
+ DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
+ pci_name(dev), i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
@@ -169,18 +171,18 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
-void pcibios_align_resource(void *data, struct resource *res, unsigned long size,
- unsigned long align)
+void pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%ld bytes)\n", pci_name(dev),
- dev->resource - res, size);
+ " (%lld bytes)\n", pci_name(dev),
+ dev->resource - res, (unsigned long long)size);
}
if (start & 0x300) {
@@ -251,8 +253,9 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
}
}
- DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n",
- res->start, res->end, res->flags, pr);
+ DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end, res->flags, pr);
if (pr) {
if (request_resource(pr, res) == 0)
continue;
@@ -302,8 +305,9 @@ reparent_resources(struct resource *parent, struct resource *res)
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
- DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n",
- p->name, p->start, p->end, res->name);
+ DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
+ p->name, (unsigned long long)p->start,
+ (unsigned long long)p->end, res->name);
}
return 0;
}
@@ -358,13 +362,15 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
try = conflict->start - 1;
}
if (request_resource(pr, res)) {
- DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n",
- res->start, res->end);
+ DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
return -1; /* "can't happen" */
}
update_bridge_base(bus, i);
- printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n",
- bus->number, i, res->start, res->end);
+ printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
+ bus->number, i, (unsigned long long)res->start,
+ (unsigned long long)res->end);
return 0;
}
@@ -475,15 +481,17 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
- DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n",
- pci_name(dev), idx, r->start, r->end, r->flags);
+ DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
+ pci_name(dev), idx, (unsigned long long)r->start,
+ (unsigned long long)r->end, r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d"
" of device %s\n", idx, pci_name(dev));
if (pr)
- DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n",
- pr, pr->start, pr->end, pr->flags);
+ DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
+ pr, (unsigned long long)pr->start,
+ (unsigned long long)pr->end, pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
@@ -952,8 +960,8 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
- prot);
+ printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
+ (unsigned long long)rp->start, prot);
return __pgprot(prot);
}
@@ -1122,7 +1130,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end)
+ resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
unsigned long offset = 0;
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 1f79e84ab464..4b4607d89bfa 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -475,7 +475,7 @@ int __init ppc_init(void)
/* register CPU devices */
for_each_possible_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
/* call platform init */
if (ppc_md.init != NULL) {
diff --git a/arch/ppc/platforms/apus_setup.c b/arch/ppc/platforms/apus_setup.c
index fe0cdc04d436..5c4118a459f3 100644
--- a/arch/ppc/platforms/apus_setup.c
+++ b/arch/ppc/platforms/apus_setup.c
@@ -734,9 +734,9 @@ void apus_init_IRQ(void)
for ( i = 0 ; i < AMI_IRQS; i++ ) {
irq_desc[i].status = IRQ_LEVEL;
if (i < IRQ_AMIGA_AUTO) {
- irq_desc[i].handler = &amiga_irqctrl;
+ irq_desc[i].chip = &amiga_irqctrl;
} else {
- irq_desc[i].handler = &amiga_sys_irqctrl;
+ irq_desc[i].chip = &amiga_sys_irqctrl;
action = &amiga_sys_irqaction[i-IRQ_AMIGA_AUTO];
if (action->name)
setup_irq(i, action);
diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c
index 866807b4ad0b..41006d2b4b38 100644
--- a/arch/ppc/platforms/sbc82xx.c
+++ b/arch/ppc/platforms/sbc82xx.c
@@ -172,7 +172,7 @@ void __init sbc82xx_init_IRQ(void)
/* Set up the interrupt handlers for the i8259 IRQs */
for (i = NR_SIU_INTS; i < NR_SIU_INTS + 8; i++) {
- irq_desc[i].handler = &sbc82xx_i8259_ic;
+ irq_desc[i].chip = &sbc82xx_i8259_ic;
irq_desc[i].status |= IRQ_LEVEL;
}
diff --git a/arch/ppc/syslib/cpc700_pic.c b/arch/ppc/syslib/cpc700_pic.c
index 5add0a919ef6..172aa215fdb0 100644
--- a/arch/ppc/syslib/cpc700_pic.c
+++ b/arch/ppc/syslib/cpc700_pic.c
@@ -140,12 +140,12 @@ cpc700_init_IRQ(void)
/* IRQ 0 is highest */
for (i = 0; i < 17; i++) {
- irq_desc[i].handler = &cpc700_pic;
+ irq_desc[i].chip = &cpc700_pic;
cpc700_pic_init_irq(i);
}
for (i = 20; i < 32; i++) {
- irq_desc[i].handler = &cpc700_pic;
+ irq_desc[i].chip = &cpc700_pic;
cpc700_pic_init_irq(i);
}
diff --git a/arch/ppc/syslib/cpm2_pic.c b/arch/ppc/syslib/cpm2_pic.c
index 29d95d415ceb..c0fee0beb815 100644
--- a/arch/ppc/syslib/cpm2_pic.c
+++ b/arch/ppc/syslib/cpm2_pic.c
@@ -171,7 +171,7 @@ void cpm2_init_IRQ(void)
/* Enable chaining to OpenPIC, and make everything level
*/
for (i = 0; i < NR_CPM_INTS; i++) {
- irq_desc[i+CPM_IRQ_OFFSET].handler = &cpm2_pic;
+ irq_desc[i+CPM_IRQ_OFFSET].chip = &cpm2_pic;
irq_desc[i+CPM_IRQ_OFFSET].status |= IRQ_LEVEL;
}
}
diff --git a/arch/ppc/syslib/gt64260_pic.c b/arch/ppc/syslib/gt64260_pic.c
index dc3bd9ecbbf6..91096b38ae70 100644
--- a/arch/ppc/syslib/gt64260_pic.c
+++ b/arch/ppc/syslib/gt64260_pic.c
@@ -98,7 +98,7 @@ gt64260_init_irq(void)
/* use the gt64260 for all (possible) interrupt sources */
for (i = gt64260_irq_base; i < (gt64260_irq_base + 96); i++)
- irq_desc[i].handler = &gt64260_pic;
+ irq_desc[i].chip = &gt64260_pic;
if (ppc_md.progress)
ppc_md.progress("gt64260_init_irq: exit", 0x0);
diff --git a/arch/ppc/syslib/m82xx_pci.c b/arch/ppc/syslib/m82xx_pci.c
index 1941a8c7ca9a..63fa5b313396 100644
--- a/arch/ppc/syslib/m82xx_pci.c
+++ b/arch/ppc/syslib/m82xx_pci.c
@@ -159,7 +159,7 @@ pq2pci_init_irq(void)
immap->im_memctl.memc_or8 = 0xffff8010;
#endif
for (irq = NR_CPM_INTS; irq < NR_CPM_INTS + 4; irq++)
- irq_desc[irq].handler = &pq2pci_ic;
+ irq_desc[irq].chip = &pq2pci_ic;
/* make PCI IRQ level sensitive */
immap->im_intctl.ic_siexr &=
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index dae9af78bde1..0c4c0de7c59f 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -347,13 +347,13 @@ m8xx_init_IRQ(void)
int i;
for (i = SIU_IRQ_OFFSET ; i < SIU_IRQ_OFFSET + NR_SIU_INTS ; i++)
- irq_desc[i].handler = &ppc8xx_pic;
+ irq_desc[i].chip = &ppc8xx_pic;
cpm_interrupt_init();
#if defined(CONFIG_PCI)
for (i = I8259_IRQ_OFFSET ; i < I8259_IRQ_OFFSET + NR_8259_INTS ; i++)
- irq_desc[i].handler = &i8259_pic;
+ irq_desc[i].chip = &i8259_pic;
i8259_pic_irq_offset = I8259_IRQ_OFFSET;
i8259_init(0);
diff --git a/arch/ppc/syslib/mpc52xx_pic.c b/arch/ppc/syslib/mpc52xx_pic.c
index c4406f9dc6a3..6425b5cee7db 100644
--- a/arch/ppc/syslib/mpc52xx_pic.c
+++ b/arch/ppc/syslib/mpc52xx_pic.c
@@ -204,9 +204,9 @@ mpc52xx_init_irq(void)
out_be32(&intr->main_pri1, 0);
out_be32(&intr->main_pri2, 0);
- /* Initialize irq_desc[i].handler's with mpc52xx_ic. */
+ /* Initialize irq_desc[i].chip's with mpc52xx_ic. */
for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].handler = &mpc52xx_ic;
+ irq_desc[i].chip = &mpc52xx_ic;
irq_desc[i].status = IRQ_LEVEL;
}
diff --git a/arch/ppc/syslib/mv64360_pic.c b/arch/ppc/syslib/mv64360_pic.c
index 5a19697060f0..a4244d468381 100644
--- a/arch/ppc/syslib/mv64360_pic.c
+++ b/arch/ppc/syslib/mv64360_pic.c
@@ -119,7 +119,7 @@ mv64360_init_irq(void)
/* All interrupts are level interrupts */
for (i = mv64360_irq_base; i < (mv64360_irq_base + 96); i++) {
irq_desc[i].status |= IRQ_LEVEL;
- irq_desc[i].handler = &mv64360_pic;
+ irq_desc[i].chip = &mv64360_pic;
}
if (ppc_md.progress)
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 70456c8f998c..767a0bc95817 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -373,7 +373,7 @@ void __init openpic_init(int offset)
OPENPIC_VEC_IPI+i+offset);
/* IPIs are per-CPU */
irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU;
- irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi;
+ irq_desc[OPENPIC_VEC_IPI+i+offset].chip = &open_pic_ipi;
}
#endif
@@ -408,7 +408,7 @@ void __init openpic_init(int offset)
/* Init descriptors */
for (i = offset; i < NumSources + offset; i++)
- irq_desc[i].handler = &open_pic;
+ irq_desc[i].chip = &open_pic;
/* Initialize the spurious interrupt */
if (ppc_md.progress) ppc_md.progress("openpic: spurious",0x3bd);
@@ -615,8 +615,8 @@ void __devinit do_openpic_setup_cpu(void)
/* let the openpic know we want intrs. default affinity
* is 0xffffffff until changed via /proc
* That's how it's done on x86. If we want it differently, then
- * we should make sure we also change the default values of irq_affinity
- * in irq.c.
+ * we should make sure we also change the default values of
+ * irq_desc[].affinity in irq.c.
*/
for (i = 0; i < NumSources; i++)
openpic_mapirq(i, msk, CPU_MASK_ALL);
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
index bcbe40de26fe..b8154efff6ed 100644
--- a/arch/ppc/syslib/open_pic2.c
+++ b/arch/ppc/syslib/open_pic2.c
@@ -290,7 +290,7 @@ void __init openpic2_init(int offset)
/* Init descriptors */
for (i = offset; i < NumSources + offset; i++)
- irq_desc[i].handler = &open_pic2;
+ irq_desc[i].chip = &open_pic2;
/* Initialize the spurious interrupt */
if (ppc_md.progress) ppc_md.progress("openpic2: spurious",0x3bd);
diff --git a/arch/ppc/syslib/ppc403_pic.c b/arch/ppc/syslib/ppc403_pic.c
index c46043c47225..1584c8b1229f 100644
--- a/arch/ppc/syslib/ppc403_pic.c
+++ b/arch/ppc/syslib/ppc403_pic.c
@@ -121,5 +121,5 @@ ppc4xx_pic_init(void)
ppc_md.get_irq = ppc403_pic_get_irq;
for (i = 0; i < NR_IRQS; i++)
- irq_desc[i].handler = &ppc403_aic;
+ irq_desc[i].chip = &ppc403_aic;
}
diff --git a/arch/ppc/syslib/ppc4xx_pic.c b/arch/ppc/syslib/ppc4xx_pic.c
index fd9af0fc0e9f..e669c1335d47 100644
--- a/arch/ppc/syslib/ppc4xx_pic.c
+++ b/arch/ppc/syslib/ppc4xx_pic.c
@@ -276,7 +276,7 @@ void __init ppc4xx_pic_init(void)
/* Attach low-level handlers */
for (i = 0; i < (NR_UICS << 5); ++i) {
- irq_desc[i].handler = &__uic[i >> 5].decl;
+ irq_desc[i].chip = &__uic[i >> 5].decl;
if (is_level_sensitive(i))
irq_desc[i].status |= IRQ_LEVEL;
}
diff --git a/arch/ppc/syslib/xilinx_pic.c b/arch/ppc/syslib/xilinx_pic.c
index e672b600f315..39a93dc6375b 100644
--- a/arch/ppc/syslib/xilinx_pic.c
+++ b/arch/ppc/syslib/xilinx_pic.c
@@ -143,7 +143,7 @@ ppc4xx_pic_init(void)
ppc_md.get_irq = xilinx_pic_get_irq;
for (i = 0; i < NR_IRQS; ++i) {
- irq_desc[i].handler = &xilinx_intc;
+ irq_desc[i].chip = &xilinx_intc;
if (XPAR_INTC_0_KIND_OF_INTR & (0x00000001 << i))
irq_desc[i].status &= ~IRQ_LEVEL;
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index e806a8922bbb..71d65eb30650 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -3,9 +3,9 @@
*
* Definitions and interface for Linux - z/VM Monitor Stream.
*
- * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
- * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
//#define APPLDATA_DEBUG /* Debug messages on/off */
@@ -29,6 +29,22 @@
#define CTL_APPLDATA_NET_SUM 2125
#define CTL_APPLDATA_PROC 2126
+#ifndef CONFIG_64BIT
+
+#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
+#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
+#define APPLDATA_GEN_EVENT_RECORD 0x02
+#define APPLDATA_START_CONFIG_REC 0x03
+
+#else
+
+#define APPLDATA_START_INTERVAL_REC 0x80
+#define APPLDATA_STOP_REC 0x81
+#define APPLDATA_GEN_EVENT_RECORD 0x82
+#define APPLDATA_START_CONFIG_REC 0x83
+
+#endif /* CONFIG_64BIT */
+
#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
@@ -53,7 +69,11 @@ struct appldata_ops {
void *data; /* record data */
unsigned int size; /* size of record */
struct module *owner; /* THIS_MODULE */
+ char mod_lvl[2]; /* modification level, EBCDIC */
};
extern int appldata_register_ops(struct appldata_ops *ops);
extern void appldata_unregister_ops(struct appldata_ops *ops);
+extern int appldata_diag(char record_nr, u16 function, unsigned long buffer,
+ u16 length, char *mod_lvl);
+
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9a22434a580c..61bc44626c04 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,9 +5,9 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
*
- * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
- * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/config.h>
@@ -40,22 +40,6 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
-#ifndef CONFIG_64BIT
-
-#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
-#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
-#define APPLDATA_GEN_EVENT_RECORD 0x02
-#define APPLDATA_START_CONFIG_REC 0x03
-
-#else
-
-#define APPLDATA_START_INTERVAL_REC 0x80
-#define APPLDATA_STOP_REC 0x81
-#define APPLDATA_GEN_EVENT_RECORD 0x82
-#define APPLDATA_START_CONFIG_REC 0x83
-
-#endif /* CONFIG_64BIT */
-
/*
* Parameter list for DIAGNOSE X'DC'
@@ -195,8 +179,8 @@ static void appldata_work_fn(void *data)
*
* prepare parameter list, issue DIAG 0xDC
*/
-static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
- u16 length)
+int appldata_diag(char record_nr, u16 function, unsigned long buffer,
+ u16 length, char *mod_lvl)
{
unsigned long ry;
struct appldata_product_id {
@@ -214,7 +198,7 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
.record_nr = record_nr,
.version_nr = {0xF2, 0xF6}, /* "26" */
.release_nr = {0xF0, 0xF1}, /* "01" */
- .mod_lvl = {0xF0, 0xF0}, /* "00" */
+ .mod_lvl = {mod_lvl[0], mod_lvl[1]},
};
struct appldata_parameter_list appldata_parameter_list = {
.diag = 0xDC,
@@ -467,24 +451,25 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
module_put(ops->owner);
return -ENODEV;
}
- ops->active = 1;
ops->callback(ops->data); // init record
rc = appldata_diag(ops->record_nr,
APPLDATA_START_INTERVAL_REC,
- (unsigned long) ops->data, ops->size);
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
if (rc != 0) {
P_ERROR("START DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc);
module_put(ops->owner);
- ops->active = 0;
} else {
P_INFO("Monitoring %s data enabled, "
"DIAG 0xDC started.\n", ops->name);
+ ops->active = 1;
}
} else if ((buf[0] == '0') && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
- (unsigned long) ops->data, ops->size);
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
if (rc != 0) {
P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc);
@@ -633,7 +618,7 @@ appldata_offline_cpu(int cpu)
spin_unlock(&appldata_timer_lock);
}
-static int
+static int __cpuinit
appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -652,7 +637,7 @@ appldata_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block appldata_nb = {
+static struct notifier_block __devinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
@@ -710,7 +695,8 @@ static void __exit appldata_exit(void)
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
- (unsigned long) ops->data, ops->size);
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
if (rc != 0) {
P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc);
@@ -739,6 +725,7 @@ MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops);
+EXPORT_SYMBOL_GPL(appldata_diag);
#ifdef MODULE
/*
@@ -779,7 +766,6 @@ unsigned long nr_iowait(void)
#endif /* MODULE */
EXPORT_SYMBOL_GPL(si_swapinfo);
EXPORT_SYMBOL_GPL(nr_threads);
-EXPORT_SYMBOL_GPL(avenrun);
EXPORT_SYMBOL_GPL(get_full_page_state);
EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index f0e2fbed3d4c..7915a197d96d 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -4,9 +4,9 @@
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
*
- * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
- * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/config.h>
@@ -152,6 +152,7 @@ static struct appldata_ops ops = {
.callback = &appldata_get_mem_data,
.data = &appldata_mem_data,
.owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
};
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 2a4c7432db4a..39b7bdecbf05 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -5,9 +5,9 @@
* Collects accumulated network statistics (Packets received/transmitted,
* dropped, errors, ...).
*
- * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
- * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/config.h>
@@ -152,6 +152,7 @@ static struct appldata_ops ops = {
.callback = &appldata_get_net_sum_data,
.data = &appldata_net_sum_data,
.owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
};
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 99ddd3bf2fba..f2b44a2f1dec 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -4,9 +4,9 @@
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes).
*
- * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
- * Author: Gerald Schaefer <geraldsc@de.ibm.com>
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/config.h>
@@ -44,11 +44,14 @@ struct appldata_os_per_cpu {
u32 per_cpu_system; /* ... spent in kernel mode */
u32 per_cpu_idle; /* ... spent in idle mode */
-// New in 2.6 -->
+ /* New in 2.6 */
u32 per_cpu_irq; /* ... spent in interrupts */
u32 per_cpu_softirq; /* ... spent in softirqs */
u32 per_cpu_iowait; /* ... spent while waiting for I/O */
-// <-- New in 2.6
+
+ /* New in modification level 01 */
+ u32 per_cpu_steal; /* ... stolen by hypervisor */
+ u32 cpu_id; /* number of this CPU */
} __attribute__((packed));
struct appldata_os_data {
@@ -68,10 +71,9 @@ struct appldata_os_data {
u32 avenrun[3]; /* average nr. of running processes during */
/* the last 1, 5 and 15 minutes */
-// New in 2.6 -->
+ /* New in 2.6 */
u32 nr_iowait; /* number of blocked threads
(waiting for I/O) */
-// <-- New in 2.6
/* per cpu data */
struct appldata_os_per_cpu os_cpu[0];
@@ -79,6 +81,14 @@ struct appldata_os_data {
static struct appldata_os_data *appldata_os_data;
+static struct appldata_ops ops = {
+ .ctl_nr = CTL_APPLDATA_OS,
+ .name = "os",
+ .record_nr = APPLDATA_RECORD_OS_ID,
+ .owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF1}, /* EBCDIC "01" */
+};
+
static inline void appldata_print_debug(struct appldata_os_data *os_data)
{
@@ -100,15 +110,17 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
- "idle = %u, irq = %u, softirq = %u, iowait = %u\n",
- i,
+ "idle = %u, irq = %u, softirq = %u, iowait = %u, "
+ "steal = %u\n",
+ os_data->os_cpu[i].cpu_id,
os_data->os_cpu[i].per_cpu_user,
os_data->os_cpu[i].per_cpu_nice,
os_data->os_cpu[i].per_cpu_system,
os_data->os_cpu[i].per_cpu_idle,
os_data->os_cpu[i].per_cpu_irq,
os_data->os_cpu[i].per_cpu_softirq,
- os_data->os_cpu[i].per_cpu_iowait);
+ os_data->os_cpu[i].per_cpu_iowait,
+ os_data->os_cpu[i].per_cpu_steal);
}
P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
@@ -123,14 +135,13 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
*/
static void appldata_get_os_data(void *data)
{
- int i, j;
+ int i, j, rc;
struct appldata_os_data *os_data;
+ unsigned int new_size;
os_data = data;
os_data->sync_count_1++;
- os_data->nr_cpus = num_online_cpus();
-
os_data->nr_threads = nr_threads;
os_data->nr_running = nr_running();
os_data->nr_iowait = nr_iowait();
@@ -154,9 +165,44 @@ static void appldata_get_os_data(void *data)
cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
os_data->os_cpu[j].per_cpu_iowait =
cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
+ os_data->os_cpu[j].per_cpu_steal =
+ cputime_to_jiffies(kstat_cpu(i).cpustat.steal);
+ os_data->os_cpu[j].cpu_id = i;
j++;
}
+ os_data->nr_cpus = j;
+
+ new_size = sizeof(struct appldata_os_data) +
+ (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
+ if (ops.size != new_size) {
+ if (ops.active) {
+ rc = appldata_diag(APPLDATA_RECORD_OS_ID,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops.data, new_size,
+ ops.mod_lvl);
+ if (rc != 0) {
+ P_ERROR("os: START NEW DIAG 0xDC failed, "
+ "return code: %d, new size = %i\n", rc,
+ new_size);
+ P_INFO("os: stopping old record now\n");
+ } else
+ P_INFO("os: new record size = %i\n", new_size);
+
+ rc = appldata_diag(APPLDATA_RECORD_OS_ID,
+ APPLDATA_STOP_REC,
+ (unsigned long) ops.data, ops.size,
+ ops.mod_lvl);
+ if (rc != 0)
+ P_ERROR("os: STOP OLD DIAG 0xDC failed, "
+ "return code: %d, old size = %i\n", rc,
+ ops.size);
+ else
+ P_INFO("os: old record size = %i stopped\n",
+ ops.size);
+ }
+ ops.size = new_size;
+ }
os_data->timestamp = get_clock();
os_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
@@ -165,15 +211,6 @@ static void appldata_get_os_data(void *data)
}
-static struct appldata_ops ops = {
- .ctl_nr = CTL_APPLDATA_OS,
- .name = "os",
- .record_nr = APPLDATA_RECORD_OS_ID,
- .callback = &appldata_get_os_data,
- .owner = THIS_MODULE,
-};
-
-
/*
* appldata_os_init()
*
@@ -181,26 +218,25 @@ static struct appldata_ops ops = {
*/
static int __init appldata_os_init(void)
{
- int rc, size;
+ int rc, max_size;
- size = sizeof(struct appldata_os_data) +
- (NR_CPUS * sizeof(struct appldata_os_per_cpu));
- if (size > APPLDATA_MAX_REC_SIZE) {
- P_ERROR("Size of record = %i, bigger than maximum (%i)!\n",
- size, APPLDATA_MAX_REC_SIZE);
+ max_size = sizeof(struct appldata_os_data) +
+ (NR_CPUS * sizeof(struct appldata_os_per_cpu));
+ if (max_size > APPLDATA_MAX_REC_SIZE) {
+ P_ERROR("Max. size of OS record = %i, bigger than maximum "
+ "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE);
rc = -ENOMEM;
goto out;
}
- P_DEBUG("sizeof(os) = %i, sizeof(os_cpu) = %lu\n", size,
+ P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
sizeof(struct appldata_os_per_cpu));
- appldata_os_data = kmalloc(size, GFP_DMA);
+ appldata_os_data = kzalloc(max_size, GFP_DMA);
if (appldata_os_data == NULL) {
P_ERROR("No memory for %s!\n", ops.name);
rc = -ENOMEM;
goto out;
}
- memset(appldata_os_data, 0, size);
appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
@@ -208,7 +244,7 @@ static int __init appldata_os_init(void)
P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
ops.data = appldata_os_data;
- ops.size = size;
+ ops.callback = &appldata_get_os_data;
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c5ca2dc5d428..5713c7e5bd16 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -37,10 +37,10 @@ struct s390_aes_ctx {
int key_len;
};
-static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len,
- u32 *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct s390_aes_ctx *sctx = ctx;
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case 16:
@@ -70,9 +70,9 @@ fail:
return -EINVAL;
}
-static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = ctx;
+ const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
switch (sctx->key_len) {
case 16:
@@ -90,9 +90,9 @@ static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
}
}
-static void aes_decrypt(void *ctx, u8 *out, const u8 *in)
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = ctx;
+ const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
switch (sctx->key_len) {
case 16:
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index e3c37aa0a199..b3f7496a79b4 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -44,10 +44,10 @@ struct crypt_s390_des3_192_ctx {
u8 key[DES3_192_KEY_SIZE];
};
-static int des_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
- struct crypt_s390_des_ctx *dctx = ctx;
+ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
int ret;
/* test if key is valid (not a weak key) */
@@ -57,16 +57,16 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen,
return ret;
}
-static void des_encrypt(void *ctx, u8 *out, const u8 *in)
+static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct crypt_s390_des_ctx *dctx = ctx;
+ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
}
-static void des_decrypt(void *ctx, u8 *out, const u8 *in)
+static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct crypt_s390_des_ctx *dctx = ctx;
+ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
}
@@ -166,11 +166,11 @@ static struct crypto_alg des_alg = {
* Implementers MUST reject keys that exhibit this property.
*
*/
-static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
+static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
int i, ret;
- struct crypt_s390_des3_128_ctx *dctx = ctx;
+ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
const u8* temp_key = key;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
@@ -186,17 +186,17 @@ static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen,
return 0;
}
-static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_128_ctx *dctx = ctx;
+ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
DES3_128_BLOCK_SIZE);
}
-static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_128_ctx *dctx = ctx;
+ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
DES3_128_BLOCK_SIZE);
@@ -302,11 +302,11 @@ static struct crypto_alg des3_128_alg = {
* property.
*
*/
-static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
+static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
int i, ret;
- struct crypt_s390_des3_192_ctx *dctx = ctx;
+ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
const u8* temp_key = key;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -325,17 +325,17 @@ static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen,
return 0;
}
-static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_192_ctx *dctx = ctx;
+ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
DES3_192_BLOCK_SIZE);
}
-static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_192_ctx *dctx = ctx;
+ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
DES3_192_BLOCK_SIZE);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 98c896b86dcd..9d34a35b1aa5 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -40,28 +40,29 @@ struct crypt_s390_sha1_ctx {
u8 buffer[2 * SHA1_BLOCK_SIZE];
};
-static void
-sha1_init(void *ctx)
+static void sha1_init(struct crypto_tfm *tfm)
{
- static const struct crypt_s390_sha1_ctx initstate = {
- .state = {
- 0x67452301,
- 0xEFCDAB89,
- 0x98BADCFE,
- 0x10325476,
- 0xC3D2E1F0
- },
+ struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
+ static const u32 initstate[5] = {
+ 0x67452301,
+ 0xEFCDAB89,
+ 0x98BADCFE,
+ 0x10325476,
+ 0xC3D2E1F0
};
- memcpy(ctx, &initstate, sizeof(initstate));
+
+ ctx->count = 0;
+ memcpy(ctx->state, &initstate, sizeof(initstate));
+ ctx->buf_len = 0;
}
-static void
-sha1_update(void *ctx, const u8 *data, unsigned int len)
+static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{
struct crypt_s390_sha1_ctx *sctx;
long imd_len;
- sctx = ctx;
+ sctx = crypto_tfm_ctx(tfm);
sctx->count += len * 8; //message bit length
//anything in buffer yet? -> must be completed
@@ -110,10 +111,9 @@ pad_message(struct crypt_s390_sha1_ctx* sctx)
}
/* Add padding and return the message digest. */
-static void
-sha1_final(void* ctx, u8 *out)
+static void sha1_final(struct crypto_tfm *tfm, u8 *out)
{
- struct crypt_s390_sha1_ctx *sctx = ctx;
+ struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
//must perform manual padding
pad_message(sctx);
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 1ec5e92b3454..f573df30f31d 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -31,9 +31,9 @@ struct s390_sha256_ctx {
u8 buf[2 * SHA256_BLOCK_SIZE];
};
-static void sha256_init(void *ctx)
+static void sha256_init(struct crypto_tfm *tfm)
{
- struct s390_sha256_ctx *sctx = ctx;
+ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = 0x6a09e667;
sctx->state[1] = 0xbb67ae85;
@@ -44,12 +44,12 @@ static void sha256_init(void *ctx)
sctx->state[6] = 0x1f83d9ab;
sctx->state[7] = 0x5be0cd19;
sctx->count = 0;
- memset(sctx->buf, 0, sizeof(sctx->buf));
}
-static void sha256_update(void *ctx, const u8 *data, unsigned int len)
+static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{
- struct s390_sha256_ctx *sctx = ctx;
+ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int index;
int ret;
@@ -108,9 +108,9 @@ static void pad_message(struct s390_sha256_ctx* sctx)
}
/* Add padding and return the message digest */
-static void sha256_final(void* ctx, u8 *out)
+static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
- struct s390_sha256_ctx *sctx = ctx;
+ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
/* must perform manual padding */
pad_message(sctx);
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 1f451c2cb071..12a6311e9838 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -177,11 +177,6 @@ struct elf_prpsinfo32
#include <linux/highuid.h>
-#undef NEW_TO_OLD_UID
-#undef NEW_TO_OLD_GID
-#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
-#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
-
#define elf_addr_t u32
/*
#define init_elf_binfmt init_elf32_binfmt
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b2448487854c..aa8b52c2140f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -93,13 +93,22 @@ STACK_SIZE = 1 << STACK_SHIFT
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm
- .macro SAVE_ALL psworg,savearea,sync
+ .macro SAVE_ALL_SYNC psworg,savearea
la %r12,\psworg
- .if \sync
tm \psworg+1,0x01 # test problem state bit
bz BASED(2f) # skip stack setup save
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
- .else
+#ifdef CONFIG_CHECK_STACK
+ b BASED(3f)
+2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
+ bz BASED(stack_overflow)
+3:
+#endif
+2:
+ .endm
+
+ .macro SAVE_ALL_ASYNC psworg,savearea
+ la %r12,\psworg
tm \psworg+1,0x01 # test problem state bit
bnz BASED(1f) # from user -> load async stack
clc \psworg+4(4),BASED(.Lcritical_end)
@@ -115,7 +124,6 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,STACK_SHIFT
be BASED(2f)
1: l %r15,__LC_ASYNC_STACK
- .endif
#ifdef CONFIG_CHECK_STACK
b BASED(3f)
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@@ -196,7 +204,7 @@ system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lh %r7,0x8a # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -425,7 +433,7 @@ pgm_check_handler:
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
- SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -464,7 +472,7 @@ pgm_per:
# Normal per exception
#
pgm_per_std:
- SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -490,7 +498,7 @@ pgm_no_vtime2:
# it was a single stepped SVC that is causing all the trouble
#
pgm_svcper:
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -519,7 +527,7 @@ io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
- SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
+ SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -631,7 +639,7 @@ ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
- SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
+ SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -657,21 +665,31 @@ __critical_end:
.globl mcck_int_handler
mcck_int_handler:
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- bo BASED(0f)
- spt __LC_LAST_UPDATE_TIMER # revalidate cpu timer
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
- mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER
+ mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
+ mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
+ tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
+ bo BASED(1f)
+ la %r14,__LC_SYNC_ENTER_TIMER
+ clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
+ bl BASED(0f)
+ la %r14,__LC_ASYNC_ENTER_TIMER
+0: clc 0(8,%r14),__LC_EXIT_TIMER
+ bl BASED(0f)
+ la %r14,__LC_EXIT_TIMER
+0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
+ bl BASED(0f)
+ la %r14,__LC_LAST_UPDATE_TIMER
+0: spt 0(%r14)
+ mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
+1:
#endif
-0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+ tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
bnz BASED(mcck_int_main) # from user -> load async stack
@@ -691,7 +709,7 @@ mcck_int_main:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
- tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(mcck_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
@@ -715,6 +733,20 @@ mcck_no_vtime:
l %r1,BASED(.Ls390_handle_mcck)
basr %r14,%r1 # call machine check handler
mcck_return:
+ mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
+ ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
+ tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ bno BASED(0f)
+ lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
+ stpt __LC_EXIT_TIMER
+ lpsw __LC_RETURN_MCCK_PSW # back to caller
+0:
+#endif
+ lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
+ lpsw __LC_RETURN_MCCK_PSW # back to caller
+
RESTORE_ALL __LC_RETURN_MCCK_PSW,0
#ifdef CONFIG_SMP
@@ -781,6 +813,8 @@ cleanup_table_sysc_leave:
.long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
cleanup_table_sysc_work_loop:
.long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
+cleanup_table_io_return:
+ .long io_return + 0x80000000, io_leave + 0x80000000
cleanup_table_io_leave:
.long io_leave + 0x80000000, io_done + 0x80000000
cleanup_table_io_work_loop:
@@ -807,6 +841,11 @@ cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
bl BASED(cleanup_sysc_return)
0:
+ clc 4(4,%r12),BASED(cleanup_table_io_return)
+ bl BASED(0f)
+ clc 4(4,%r12),BASED(cleanup_table_io_return+4)
+ bl BASED(cleanup_io_return)
+0:
clc 4(4,%r12),BASED(cleanup_table_io_leave)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
@@ -839,7 +878,7 @@ cleanup_system_call:
mvc __LC_SAVE_AREA(16),0(%r12)
0: st %r13,4(%r12)
st %r12,__LC_SAVE_AREA+48 # argh
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
l %r12,__LC_SAVE_AREA+48 # argh
st %r15,12(%r12)
@@ -980,7 +1019,6 @@ cleanup_io_leave_insn:
.long cleanup_critical
#define SYSCALL(esa,esame,emu) .long esa
- .globl sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2ac095bc0e25..f3222a1b2861 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -87,13 +87,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
larl %r13,system_call
.endm
- .macro SAVE_ALL psworg,savearea,sync
+ .macro SAVE_ALL_SYNC psworg,savearea
la %r12,\psworg
- .if \sync
tm \psworg+1,0x01 # test problem state bit
jz 2f # skip stack setup save
lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
- .else
+#ifdef CONFIG_CHECK_STACK
+ j 3f
+2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
+ jz stack_overflow
+3:
+#endif
+2:
+ .endm
+
+ .macro SAVE_ALL_ASYNC psworg,savearea
+ la %r12,\psworg
tm \psworg+1,0x01 # test problem state bit
jnz 1f # from user -> load kernel stack
clc \psworg+8(8),BASED(.Lcritical_end)
@@ -108,7 +117,6 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
srag %r14,%r14,STACK_SHIFT
jz 2f
1: lg %r15,__LC_ASYNC_STACK # load async stack
- .endif
#ifdef CONFIG_CHECK_STACK
j 3f
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@@ -187,7 +195,7 @@ system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -446,7 +454,7 @@ pgm_check_handler:
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
- SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -485,7 +493,7 @@ pgm_per:
# Normal per exception
#
pgm_per_std:
- SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -511,7 +519,7 @@ pgm_no_vtime2:
# it was a single stepped SVC that is causing all the trouble
#
pgm_svcper:
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -539,7 +547,7 @@ io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
- SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+ SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -647,7 +655,7 @@ ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
- SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+ SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -672,21 +680,32 @@ __critical_end:
mcck_int_handler:
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r1)
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+64
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- jo 0f
- spt __LC_LAST_UPDATE_TIMER
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
- mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER
+ la %r14,4095
+ mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
+ mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
+ tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
+ jo 1f
+ la %r14,__LC_SYNC_ENTER_TIMER
+ clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
+ jl 0f
+ la %r14,__LC_ASYNC_ENTER_TIMER
+0: clc 0(8,%r14),__LC_EXIT_TIMER
+ jl 0f
+ la %r14,__LC_EXIT_TIMER
+0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
+ jl 0f
+ la %r14,__LC_LAST_UPDATE_TIMER
+0: spt 0(%r14)
+ mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
+1:
#endif
-0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+ tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
jnz mcck_int_main # from user -> load kernel stack
@@ -705,7 +724,7 @@ mcck_int_main:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
- tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
+ tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
@@ -727,7 +746,17 @@ mcck_no_vtime:
jno mcck_return
brasl %r14,s390_handle_mcck
mcck_return:
- RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+ mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
+ ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
+ lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
+ tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ jno 0f
+ stpt __LC_EXIT_TIMER
+0:
+#endif
+ lpswe __LC_RETURN_MCCK_PSW # back to caller
#ifdef CONFIG_SMP
/*
@@ -789,6 +818,8 @@ cleanup_table_sysc_leave:
.quad sysc_leave, sysc_work_loop
cleanup_table_sysc_work_loop:
.quad sysc_work_loop, sysc_reschedule
+cleanup_table_io_return:
+ .quad io_return, io_leave
cleanup_table_io_leave:
.quad io_leave, io_done
cleanup_table_io_work_loop:
@@ -815,6 +846,11 @@ cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
jl cleanup_sysc_return
0:
+ clc 8(8,%r12),BASED(cleanup_table_io_return)
+ jl 0f
+ clc 8(8,%r12),BASED(cleanup_table_io_return+8)
+ jl cleanup_io_return
+0:
clc 8(8,%r12),BASED(cleanup_table_io_leave)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
@@ -847,7 +883,7 @@ cleanup_system_call:
mvc __LC_SAVE_AREA(32),0(%r12)
0: stg %r13,8(%r12)
stg %r12,__LC_SAVE_AREA+96 # argh
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lg %r12,__LC_SAVE_AREA+96 # argh
stg %r15,24(%r12)
@@ -957,7 +993,6 @@ cleanup_io_leave_insn:
.quad __critical_end
#define SYSCALL(esa,esame,emu) .long esame
- .globl sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
@@ -965,7 +1000,6 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
- .globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ea88d066bf04..538c82da49b1 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/head.S
*
- * (C) Copyright IBM Corp. 1999, 2005
+ * Copyright (C) IBM Corp. 1999,2006
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -482,24 +482,23 @@ start:
.macro GET_IPL_DEVICE
.Lget_ipl_device:
- basr %r12,0
-.LGID: l %r1,0xb8 # get sid
+ l %r1,0xb8 # get sid
sll %r1,15 # test if subchannel is enabled
srl %r1,31
ltr %r1,%r1
- bz 0(%r14) # subchannel disabled
+ bz 2f-.LPG1(%r13) # subchannel disabled
l %r1,0xb8
- la %r5,.Lipl_schib-.LGID(%r12)
+ la %r5,.Lipl_schib-.LPG1(%r13)
stsch 0(%r5) # get schib of subchannel
- bnz 0(%r14) # schib not available
+ bnz 2f-.LPG1(%r13) # schib not available
tm 5(%r5),0x01 # devno valid?
- bno 0(%r14)
- la %r6,ipl_parameter_flags-.LGID(%r12)
+ bno 2f-.LPG1(%r13)
+ la %r6,ipl_parameter_flags-.LPG1(%r13)
oi 3(%r6),0x01 # set flag
- la %r2,ipl_devno-.LGID(%r12)
+ la %r2,ipl_devno-.LPG1(%r13)
mvc 0(2,%r2),6(%r5) # store devno
tm 4(%r5),0x80 # qdio capable device?
- bno 0(%r14)
+ bno 2f-.LPG1(%r13)
oi 3(%r6),0x02 # set flag
# copy ipl parameters
@@ -523,7 +522,7 @@ start:
ar %r2,%r1
sr %r0,%r4
jne 1b
- b 0(%r14)
+ b 2f-.LPG1(%r13)
.align 4
.Lipl_schib:
@@ -537,6 +536,7 @@ ipl_parameter_flags:
.globl ipl_devno
ipl_devno:
.word 0
+2:
.endm
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 2d3b089bfb83..d00de17b3778 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/head31.S
*
- * (C) Copyright IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005,2006
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -16,12 +16,31 @@
# or linload or SALIPL
#
.org 0x10000
-startup:basr %r13,0 # get base
-.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13)
- basr %r14, %r1
+startup:basr %r13,0 # get base
+.LPG0: l %r13,0f-.LPG0(%r13)
+ b 0(%r13)
+0: .long startup_continue
+
+#
+# params at 10400 (setup.h)
+#
+ .org PARMAREA
+ .long 0,0 # IPL_DEVICE
+ .long 0,RAMDISK_ORIGIN # INITRD_START
+ .long 0,RAMDISK_SIZE # INITRD_SIZE
+
+ .org COMMAND_LINE
+ .byte "root=/dev/ram0 ro"
+ .byte 0
+
+ .org 0x11000
+
+startup_continue:
+ basr %r13,0 # get base
+.LPG1: GET_IPL_DEVICE
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
+ l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
+ # move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
#
@@ -51,8 +70,8 @@ startup:basr %r13,0 # get base
a %r1,__LC_EXT_NEW_PSW+4 # set handler
st %r1,__LC_EXT_NEW_PSW+4
- la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff
- la %r1, .Lsccb-PARMAREA(%r4) # our sccb
+ l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
+ lr %r1,%r4 # our sccb
.insn rre,0xb2200000,%r2,%r1 # service call
ipm %r1
srl %r1,28 # get cc code
@@ -63,7 +82,7 @@ startup:basr %r13,0 # get base
be .Lservicecall-.LPG1(%r13)
lpsw .Lwaitsclp-.LPG1(%r13)
.Lsclph:
- lh %r1,.Lsccbr-PARMAREA(%r4)
+ lh %r1,.Lsccbr-.Lsccb(%r4)
chi %r1,0x10 # 0x0010 is the sucess code
je .Lprocsccb # let's process the sccb
chi %r1,0x1f0
@@ -74,7 +93,7 @@ startup:basr %r13,0 # get base
b .Lservicecall-.LPG1(%r13)
.Lprocsccb:
lhi %r1,0
- icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+ icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
jnz .Lscnd
lhi %r1,0x800 # otherwise report 2GB
.Lscnd:
@@ -84,10 +103,10 @@ startup:basr %r13,0 # get base
lr %r1,%r3
.Lno2gb:
xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-PARMAREA(%r4)
+ ic %r3,.Lscpa1-.Lsccb(%r4)
chi %r3,0x00
jne .Lcompmem
- l %r3,.Lscpa2-PARMAREA(%r13)
+ l %r3,.Lscpa2-.Lsccb(%r4)
.Lcompmem:
mr %r2,%r1 # mem in MB on 128-bit
l %r1,.Lonemb-.LPG1(%r13)
@@ -95,8 +114,6 @@ startup:basr %r13,0 # get base
b .Lfchunk-.LPG1(%r13)
.align 4
-.Lget_ipl_device_addr:
- .long .Lget_ipl_device
.Lpmask:
.byte 0
.align 8
@@ -242,6 +259,8 @@ startup:basr %r13,0 # get base
.long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations
+.Lduct: .long 0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0
.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
@@ -252,25 +271,9 @@ startup:basr %r13,0 # get base
.Lmflags:.long machine_flags
.Lbss_bgn: .long __bss_start
.Lbss_end: .long _end
-
- .org PARMAREA-64
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
-
-#
-# params at 10400 (setup.h)
-#
- .org PARMAREA
- .global _pstart
-_pstart:
- .long 0,0 # IPL_DEVICE
- .long 0,RAMDISK_ORIGIN # INITRD_START
- .long 0,RAMDISK_SIZE # INITRD_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
- .org 0x11000
+.Lparmaddr: .long PARMAREA
+.Lsccbaddr: .long .Lsccb
+ .align 4096
.Lsccb:
.hword 0x1000 # length, one page
.byte 0x00,0x00,0x00
@@ -287,18 +290,14 @@ _pstart:
.Lscpincr2:
.quad 0x00
.fill 3984,1,0
- .org 0x12000
- .global _pend
-_pend:
-
- GET_IPL_DEVICE
+ .align 4096
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000
#endif
#
-# startup-code, running in virtual mode
+# startup-code, running in absolute addressing mode
#
.globl _stext
_stext: basr %r13,0 # get base
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index f08c06f45d5c..47744fcca930 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/head64.S
*
- * (C) Copyright IBM Corp. 1999,2005
+ * Copyright (C) IBM Corp. 1999,2006
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -15,18 +15,37 @@
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
- .org 0x10000
-startup:basr %r13,0 # get base
+ .org 0x10000
+startup:basr %r13,0 # get base
+.LPG0: l %r13,0f-.LPG0(%r13)
+ b 0(%r13)
+0: .long startup_continue
+
+#
+# params at 10400 (setup.h)
+#
+ .org PARMAREA
+ .quad 0 # IPL_DEVICE
+ .quad RAMDISK_ORIGIN # INITRD_START
+ .quad RAMDISK_SIZE # INITRD_SIZE
+
+ .org COMMAND_LINE
+ .byte "root=/dev/ram0 ro"
+ .byte 0
+
+ .org 0x11000
+
+startup_continue:
+ basr %r13,0 # get base
.LPG1: sll %r13,1 # remove high order bit
srl %r13,1
- l %r1,.Lget_ipl_device_addr-.LPG1(%r13)
- basr %r14,%r1
+ GET_IPL_DEVICE
lhi %r1,1 # mode 1 = esame
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- larl %r12,_pstart # pointer to parameter area
+ lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
# move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
@@ -55,8 +74,8 @@ startup:basr %r13,0 # get base
larl %r1,.Lsclph
stg %r1,__LC_EXT_NEW_PSW+8 # set handler
- larl %r4,_pstart # %r4 is our index for sccb stuff
- la %r1,.Lsccb-PARMAREA(%r4) # our sccb
+ larl %r4,.Lsccb # %r4 is our index for sccb stuff
+ lgr %r1,%r4 # our sccb
.insn rre,0xb2200000,%r2,%r1 # service call
ipm %r1
srl %r1,28 # get cc code
@@ -67,7 +86,7 @@ startup:basr %r13,0 # get base
be .Lservicecall-.LPG1(%r13)
lpswe .Lwaitsclp-.LPG1(%r13)
.Lsclph:
- lh %r1,.Lsccbr-PARMAREA(%r4)
+ lh %r1,.Lsccbr-.Lsccb(%r4)
chi %r1,0x10 # 0x0010 is the sucess code
je .Lprocsccb # let's process the sccb
chi %r1,0x1f0
@@ -78,15 +97,15 @@ startup:basr %r13,0 # get base
b .Lservicecall-.LPG1(%r13)
.Lprocsccb:
lghi %r1,0
- icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+ icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
jnz .Lscnd
- lg %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
+ lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
.Lscnd:
xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-PARMAREA(%r4)
+ ic %r3,.Lscpa1-.Lsccb(%r4)
chi %r3,0x00
jne .Lcompmem
- l %r3,.Lscpa2-PARMAREA(%r13)
+ l %r3,.Lscpa2-.Lsccb(%r4)
.Lcompmem:
mlgr %r2,%r1 # mem in MB on 128-bit
l %r1,.Lonemb-.LPG1(%r13)
@@ -94,8 +113,6 @@ startup:basr %r13,0 # get base
b .Lfchunk-.LPG1(%r13)
.align 4
-.Lget_ipl_device_addr:
- .long .Lget_ipl_device
.Lpmask:
.byte 0
.align 8
@@ -242,29 +259,16 @@ startup:basr %r13,0 # get base
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations
+.Lduct: .long 0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
+.Lparmaddr:
+ .quad PARMAREA
- .org PARMAREA-64
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
-
-#
-# params at 10400 (setup.h)
-#
- .org PARMAREA
- .global _pstart
-_pstart:
- .quad 0 # IPL_DEVICE
- .quad RAMDISK_ORIGIN # INITRD_START
- .quad RAMDISK_SIZE # INITRD_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
- .org 0x11000
+ .align 4096
.Lsccb:
.hword 0x1000 # length, one page
.byte 0x00,0x00,0x00
@@ -281,18 +285,14 @@ _pstart:
.Lscpincr2:
.quad 0x00
.fill 3984,1,0
- .org 0x12000
- .global _pend
-_pend:
-
- GET_IPL_DEVICE
+ .align 4096
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000
#endif
#
-# startup-code, running in virtual mode
+# startup-code, running in absolute addressing mode
#
.globl _stext
_stext: basr %r13,0 # get base
@@ -326,4 +326,3 @@ _stext: basr %r13,0 # get base
.align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bad81b5832db..fbde6a915264 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,8 +27,8 @@ static void kexec_halt_all_cpus(void *);
typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
-const extern unsigned char relocate_kernel[];
-const extern unsigned long long relocate_kernel_len;
+extern const unsigned char relocate_kernel[];
+extern const unsigned long long relocate_kernel_len;
int
machine_kexec_prepare(struct kimage *image)
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 4176c77670c4..0886e739d122 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -46,8 +46,6 @@ EXPORT_SYMBOL(__down_interruptible);
*/
extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(overflowuid);
-EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page);
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b282034452a4..2b2551e3510b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -37,6 +37,7 @@
#include <linux/seq_file.h>
#include <linux/kernel_stat.h>
#include <linux/device.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -115,6 +116,7 @@ void __devinit cpu_init (void)
*/
char vmhalt_cmd[128] = "";
char vmpoff_cmd[128] = "";
+char vmpanic_cmd[128] = "";
static inline void strncpy_skip_quote(char *dst, char *src, int n)
{
@@ -146,6 +148,38 @@ static int __init vmpoff_setup(char *str)
__setup("vmpoff=", vmpoff_setup);
+static int vmpanic_notify(struct notifier_block *self, unsigned long event,
+ void *data)
+{
+ if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
+ cpcmd(vmpanic_cmd, NULL, 0, NULL);
+
+ return NOTIFY_OK;
+}
+
+#define PANIC_PRI_VMPANIC 0
+
+static struct notifier_block vmpanic_nb = {
+ .notifier_call = vmpanic_notify,
+ .priority = PANIC_PRI_VMPANIC
+};
+
+static int __init vmpanic_setup(char *str)
+{
+ static int register_done __initdata = 0;
+
+ strncpy_skip_quote(vmpanic_cmd, str, 127);
+ vmpanic_cmd[127] = 0;
+ if (!register_done) {
+ register_done = 1;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &vmpanic_nb);
+ }
+ return 1;
+}
+
+__setup("vmpanic=", vmpanic_setup);
+
/*
* condev= and conmode= setup parameter.
*/
@@ -289,19 +323,34 @@ void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
void machine_restart(char *command)
{
- console_unblank();
+ if (!in_interrupt() || oops_in_progress)
+ /*
+ * Only unblank the console if we are called in enabled
+ * context or a bust_spinlocks cleared the way for us.
+ */
+ console_unblank();
_machine_restart(command);
}
void machine_halt(void)
{
- console_unblank();
+ if (!in_interrupt() || oops_in_progress)
+ /*
+ * Only unblank the console if we are called in enabled
+ * context or a bust_spinlocks cleared the way for us.
+ */
+ console_unblank();
_machine_halt();
}
void machine_power_off(void)
{
- console_unblank();
+ if (!in_interrupt() || oops_in_progress)
+ /*
+ * Only unblank the console if we are called in enabled
+ * context or a bust_spinlocks cleared the way for us.
+ */
+ console_unblank();
_machine_power_off();
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 343120c9223d..8e03219eea76 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -869,7 +869,7 @@ static int __init topology_init(void)
int ret;
for_each_possible_cpu(cpu) {
- ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", cpu, ret);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a46793beeddd..b7630436f693 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -150,13 +150,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
unsigned long *stack;
int i;
- // debugging aid: "show_stack(NULL);" prints the
- // back trace for this cpu.
-
if (!sp)
- sp = task ? (unsigned long *) task->thread.ksp : __r15;
+ stack = task ? (unsigned long *) task->thread.ksp : __r15;
+ else
+ stack = sp;
- stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index dfe6f0856617..1f0439dc245a 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -356,7 +356,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
- /* release CPU aquired in prepare_vtimer or mod_virt_timer() */
+ /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index c72e17a96eed..e467a450662b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -147,7 +147,7 @@ endif
# them changed. We use .arch and .mach to indicate when they were
# updated last, otherwise make uses the target directory mtime.
-include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER
+include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/auto.conf
@echo ' SYMLINK include/asm-sh/cpu -> include/asm-sh/$(cpuincdir-y)'
$(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
$(Q)ln -fsn $(incdir-prefix)$(cpuincdir-y) include/asm-sh/cpu
@@ -157,7 +157,7 @@ include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER
# don't, just reference the parent directory so the semantics are
# kept roughly the same.
-include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/MARKER
+include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/auto.conf
@echo -n ' SYMLINK include/asm-sh/mach -> '
$(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
$(Q)if [ -d $(incdir-prefix)$(incdir-y) ]; then \
diff --git a/arch/sh/boards/adx/irq_maskreg.c b/arch/sh/boards/adx/irq_maskreg.c
index c0973f8d57ba..357fab1bac2b 100644
--- a/arch/sh/boards/adx/irq_maskreg.c
+++ b/arch/sh/boards/adx/irq_maskreg.c
@@ -102,6 +102,6 @@ static void end_maskreg_irq(unsigned int irq)
void make_maskreg_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &maskreg_irq_type;
+ irq_desc[irq].chip = &maskreg_irq_type;
disable_maskreg_irq(irq);
}
diff --git a/arch/sh/boards/bigsur/irq.c b/arch/sh/boards/bigsur/irq.c
index 6ddbcc77244d..1d32425782c0 100644
--- a/arch/sh/boards/bigsur/irq.c
+++ b/arch/sh/boards/bigsur/irq.c
@@ -253,7 +253,7 @@ static void make_bigsur_l1isr(unsigned int irq) {
/* sanity check first */
if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
/* save the handler in the main description table */
- irq_desc[irq].handler = &bigsur_l1irq_type;
+ irq_desc[irq].chip = &bigsur_l1irq_type;
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 1;
@@ -270,7 +270,7 @@ static void make_bigsur_l2isr(unsigned int irq) {
/* sanity check first */
if(irq >= BIGSUR_2NDLVL_IRQ_LOW && irq < BIGSUR_2NDLVL_IRQ_HIGH) {
/* save the handler in the main description table */
- irq_desc[irq].handler = &bigsur_l2irq_type;
+ irq_desc[irq].chip = &bigsur_l2irq_type;
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 1;
diff --git a/arch/sh/boards/cqreek/irq.c b/arch/sh/boards/cqreek/irq.c
index d1da0d844567..2955adc52310 100644
--- a/arch/sh/boards/cqreek/irq.c
+++ b/arch/sh/boards/cqreek/irq.c
@@ -103,7 +103,7 @@ void __init init_cqreek_IRQ(void)
cqreek_irq_data[14].stat_port = BRIDGE_IDE_INTR_STAT;
cqreek_irq_data[14].bit = 1;
- irq_desc[14].handler = &cqreek_irq_type;
+ irq_desc[14].chip = &cqreek_irq_type;
irq_desc[14].status = IRQ_DISABLED;
irq_desc[14].action = 0;
irq_desc[14].depth = 1;
@@ -117,7 +117,7 @@ void __init init_cqreek_IRQ(void)
cqreek_irq_data[10].bit = (1 << 10);
/* XXX: Err... we may need demultiplexer for ISA irq... */
- irq_desc[10].handler = &cqreek_irq_type;
+ irq_desc[10].chip = &cqreek_irq_type;
irq_desc[10].status = IRQ_DISABLED;
irq_desc[10].action = 0;
irq_desc[10].depth = 1;
diff --git a/arch/sh/boards/dreamcast/setup.c b/arch/sh/boards/dreamcast/setup.c
index 55dece35cde5..0027b80a2343 100644
--- a/arch/sh/boards/dreamcast/setup.c
+++ b/arch/sh/boards/dreamcast/setup.c
@@ -70,7 +70,7 @@ int __init platform_setup(void)
/* Assign all virtual IRQs to the System ASIC int. handler */
for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++)
- irq_desc[i].handler = &systemasic_int;
+ irq_desc[i].chip = &systemasic_int;
board_time_init = aica_time_init;
diff --git a/arch/sh/boards/ec3104/setup.c b/arch/sh/boards/ec3104/setup.c
index 5130ba2b6ff1..4b3ef16a0e96 100644
--- a/arch/sh/boards/ec3104/setup.c
+++ b/arch/sh/boards/ec3104/setup.c
@@ -63,7 +63,7 @@ int __init platform_setup(void)
str[i] = ctrl_readb(EC3104_BASE + i);
for (i = EC3104_IRQBASE; i < EC3104_IRQBASE + 32; i++)
- irq_desc[i].handler = &ec3104_int;
+ irq_desc[i].chip = &ec3104_int;
printk("initializing EC3104 \"%.8s\" at %08x, IRQ %d, IRQ base %d\n",
str, EC3104_BASE, EC3104_IRQ, EC3104_IRQBASE);
diff --git a/arch/sh/boards/harp/irq.c b/arch/sh/boards/harp/irq.c
index 52d0ba39031b..701fa55d5297 100644
--- a/arch/sh/boards/harp/irq.c
+++ b/arch/sh/boards/harp/irq.c
@@ -114,7 +114,7 @@ static void enable_harp_irq(unsigned int irq)
static void __init make_harp_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &harp_irq_type;
+ irq_desc[irq].chip = &harp_irq_type;
disable_harp_irq(irq);
}
diff --git a/arch/sh/boards/mpc1211/pci.c b/arch/sh/boards/mpc1211/pci.c
index ba3a65439752..9f7ccd33ffb6 100644
--- a/arch/sh/boards/mpc1211/pci.c
+++ b/arch/sh/boards/mpc1211/pci.c
@@ -273,9 +273,9 @@ void __init pcibios_fixup_irqs(void)
}
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
if (start >= 0x10000UL) {
diff --git a/arch/sh/boards/mpc1211/setup.c b/arch/sh/boards/mpc1211/setup.c
index 2bb581b91683..b72f009c52c2 100644
--- a/arch/sh/boards/mpc1211/setup.c
+++ b/arch/sh/boards/mpc1211/setup.c
@@ -194,7 +194,7 @@ static struct hw_interrupt_type mpc1211_irq_type = {
static void make_mpc1211_irq(unsigned int irq)
{
- irq_desc[irq].handler = &mpc1211_irq_type;
+ irq_desc[irq].chip = &mpc1211_irq_type;
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 1;
diff --git a/arch/sh/boards/overdrive/galileo.c b/arch/sh/boards/overdrive/galileo.c
index 276fa11ee4ce..b055809d2ac1 100644
--- a/arch/sh/boards/overdrive/galileo.c
+++ b/arch/sh/boards/overdrive/galileo.c
@@ -536,7 +536,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
}
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size)
+ resource_size_t size)
{
}
diff --git a/arch/sh/boards/overdrive/irq.c b/arch/sh/boards/overdrive/irq.c
index 715e8feb3a68..2c13a7de6b22 100644
--- a/arch/sh/boards/overdrive/irq.c
+++ b/arch/sh/boards/overdrive/irq.c
@@ -150,7 +150,7 @@ static void enable_od_irq(unsigned int irq)
static void __init make_od_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &od_irq_type;
+ irq_desc[irq].chip = &od_irq_type;
disable_od_irq(irq);
}
diff --git a/arch/sh/boards/renesas/hs7751rvoip/irq.c b/arch/sh/boards/renesas/hs7751rvoip/irq.c
index ed4c5b50ea45..52a98b524e1f 100644
--- a/arch/sh/boards/renesas/hs7751rvoip/irq.c
+++ b/arch/sh/boards/renesas/hs7751rvoip/irq.c
@@ -86,7 +86,7 @@ static struct hw_interrupt_type hs7751rvoip_irq_type = {
static void make_hs7751rvoip_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &hs7751rvoip_irq_type;
+ irq_desc[irq].chip = &hs7751rvoip_irq_type;
disable_hs7751rvoip_irq(irq);
}
diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c
index d36c9374aed1..e16915d9cda4 100644
--- a/arch/sh/boards/renesas/rts7751r2d/irq.c
+++ b/arch/sh/boards/renesas/rts7751r2d/irq.c
@@ -100,7 +100,7 @@ static struct hw_interrupt_type rts7751r2d_irq_type = {
static void make_rts7751r2d_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &rts7751r2d_irq_type;
+ irq_desc[irq].chip = &rts7751r2d_irq_type;
disable_rts7751r2d_irq(irq);
}
diff --git a/arch/sh/boards/renesas/systemh/irq.c b/arch/sh/boards/renesas/systemh/irq.c
index 7a2eb10edb56..845979181059 100644
--- a/arch/sh/boards/renesas/systemh/irq.c
+++ b/arch/sh/boards/renesas/systemh/irq.c
@@ -105,7 +105,7 @@ static void end_systemh_irq(unsigned int irq)
void make_systemh_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &systemh_irq_type;
+ irq_desc[irq].chip = &systemh_irq_type;
disable_systemh_irq(irq);
}
diff --git a/arch/sh/boards/se/73180/irq.c b/arch/sh/boards/se/73180/irq.c
index 70f04caad9a4..402735c7c898 100644
--- a/arch/sh/boards/se/73180/irq.c
+++ b/arch/sh/boards/se/73180/irq.c
@@ -85,7 +85,7 @@ void
make_intreq_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &intreq_irq_type;
+ irq_desc[irq].chip = &intreq_irq_type;
disable_intreq_irq(irq);
}
diff --git a/arch/sh/boards/superh/microdev/irq.c b/arch/sh/boards/superh/microdev/irq.c
index efcbd86b7cd2..cb5999425d16 100644
--- a/arch/sh/boards/superh/microdev/irq.c
+++ b/arch/sh/boards/superh/microdev/irq.c
@@ -147,7 +147,7 @@ static void enable_microdev_irq(unsigned int irq)
static void __init make_microdev_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &microdev_irq_type;
+ irq_desc[irq].chip = &microdev_irq_type;
disable_microdev_irq(irq);
}
diff --git a/arch/sh/cchips/hd6446x/hd64461/setup.c b/arch/sh/cchips/hd6446x/hd64461/setup.c
index f014b9bf6922..724db04cb392 100644
--- a/arch/sh/cchips/hd6446x/hd64461/setup.c
+++ b/arch/sh/cchips/hd6446x/hd64461/setup.c
@@ -154,7 +154,7 @@ int __init setup_hd64461(void)
outw(0xffff, HD64461_NIMR);
for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) {
- irq_desc[i].handler = &hd64461_irq_type;
+ irq_desc[i].chip = &hd64461_irq_type;
}
setup_irq(CONFIG_HD64461_IRQ, &irq0);
diff --git a/arch/sh/cchips/hd6446x/hd64465/setup.c b/arch/sh/cchips/hd6446x/hd64465/setup.c
index 68e4c4e4283d..cf9142c620b7 100644
--- a/arch/sh/cchips/hd6446x/hd64465/setup.c
+++ b/arch/sh/cchips/hd6446x/hd64465/setup.c
@@ -182,7 +182,7 @@ static int __init setup_hd64465(void)
outw(0xffff, HD64465_REG_NIMR); /* mask all interrupts */
for (i = 0; i < HD64465_IRQ_NUM ; i++) {
- irq_desc[HD64465_IRQ_BASE + i].handler = &hd64465_irq_type;
+ irq_desc[HD64465_IRQ_BASE + i].chip = &hd64465_irq_type;
}
setup_irq(CONFIG_HD64465_IRQ, &irq0);
diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c
index 2ee330b3c38f..892214bade19 100644
--- a/arch/sh/cchips/voyagergx/irq.c
+++ b/arch/sh/cchips/voyagergx/irq.c
@@ -191,7 +191,7 @@ void __init setup_voyagergx_irq(void)
flag = 1;
}
if (flag == 1)
- irq_desc[VOYAGER_IRQ_BASE + i].handler = &voyagergx_irq_type;
+ irq_desc[VOYAGER_IRQ_BASE + i].chip = &voyagergx_irq_type;
}
setup_irq(IRQ_VOYAGER, &irq0);
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index c1669905abe4..3d546ba329cf 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -75,7 +75,7 @@ pcibios_update_resource(struct pci_dev *dev, struct resource *root,
}
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
__attribute__ ((weak));
/*
@@ -85,10 +85,10 @@ void pcibios_align_resource(void *data, struct resource *res,
* modulo 0x400.
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index baed9a550d39..a33ae3e0a5a5 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -105,6 +105,6 @@ static void shutdown_imask_irq(unsigned int irq)
void make_imask_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &imask_irq_type;
+ irq_desc[irq].chip = &imask_irq_type;
enable_irq(irq);
}
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c
index 06e8afab32e4..30064bf6e154 100644
--- a/arch/sh/kernel/cpu/irq/intc2.c
+++ b/arch/sh/kernel/cpu/irq/intc2.c
@@ -137,7 +137,7 @@ void make_intc2_irq(unsigned int irq,
local_irq_restore(flags);
- irq_desc[irq].handler = &intc2_irq_type;
+ irq_desc[irq].chip = &intc2_irq_type;
disable_intc2_irq(irq);
}
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index e55150ed0856..0373b65c77f9 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -115,7 +115,7 @@ void make_ipr_irq(unsigned int irq, unsigned int addr, int pos, int priority)
ipr_data[irq].shift = pos*4; /* POSition (0-3) x 4 means shift */
ipr_data[irq].priority = priority;
- irq_desc[irq].handler = &ipr_irq_type;
+ irq_desc[irq].chip = &ipr_irq_type;
disable_ipr_irq(irq);
}
diff --git a/arch/sh/kernel/cpu/irq/pint.c b/arch/sh/kernel/cpu/irq/pint.c
index 95d6024fe1ae..714963a25bba 100644
--- a/arch/sh/kernel/cpu/irq/pint.c
+++ b/arch/sh/kernel/cpu/irq/pint.c
@@ -85,7 +85,7 @@ static void end_pint_irq(unsigned int irq)
void make_pint_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &pint_irq_type;
+ irq_desc[irq].chip = &pint_irq_type;
disable_pint_irq(irq);
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index b56e79632f24..c2e07f7f3496 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -47,7 +47,7 @@ int show_interrupts(struct seq_file *p, void *v)
goto unlock;
seq_printf(p, "%3d: ",i);
seq_printf(p, "%10u ", kstat_irqs(i));
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 43546525f28f..6bcd8d92399f 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
unsigned long start_address,
unsigned long vbr_reg) ATTRIB_NORET;
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned int relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
extern void *gdb_vbr_vector;
/*
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index bb229ef030f3..9af22116c9a2 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -402,7 +402,7 @@ static int __init topology_init(void)
int cpu_id;
for_each_possible_cpu(cpu_id)
- register_cpu(&cpu[cpu_id], cpu_id, NULL);
+ register_cpu(&cpu[cpu_id], cpu_id);
return 0;
}
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c
index 5ec9ddcc4b0b..c265185b22a7 100644
--- a/arch/sh/oprofile/op_model_sh7750.c
+++ b/arch/sh/oprofile/op_model_sh7750.c
@@ -198,7 +198,7 @@ static int sh7750_perf_counter_create_files(struct super_block *sb, struct dentr
for (i = 0; i < NR_CNTRS; i++) {
struct dentry *dir;
- char buf[3];
+ char buf[4];
snprintf(buf, sizeof(buf), "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
index d69879c0e063..675776a5477e 100644
--- a/arch/sh64/kernel/irq.c
+++ b/arch/sh64/kernel/irq.c
@@ -65,7 +65,7 @@ int show_interrupts(struct seq_file *p, void *v)
goto unlock;
seq_printf(p, "%3d: ",i);
seq_printf(p, "%10u ", kstat_irqs(i));
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c
index fc99bf4e362c..fa730f5fe2e6 100644
--- a/arch/sh64/kernel/irq_intc.c
+++ b/arch/sh64/kernel/irq_intc.c
@@ -178,7 +178,7 @@ static void end_intc_irq(unsigned int irq)
void make_intc_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].handler = &intc_irq_type;
+ irq_desc[irq].chip = &intc_irq_type;
disable_intc_irq(irq);
}
@@ -208,7 +208,7 @@ void __init init_IRQ(void)
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++) {
if (platform_int_priority[i] != NO_PRIORITY) {
- irq_desc[i].handler = &intc_irq_type;
+ irq_desc[i].chip = &intc_irq_type;
}
}
diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c
index 50c61dcb9fae..945920bc24db 100644
--- a/arch/sh64/kernel/pcibios.c
+++ b/arch/sh64/kernel/pcibios.c
@@ -69,10 +69,10 @@ pcibios_update_resource(struct pci_dev *dev, struct resource *root,
* modulo 0x400.
*/
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index d2711c9c9d13..da98d8dbcf95 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -309,7 +309,7 @@ static struct cpu cpu[1];
static int __init topology_init(void)
{
- return register_cpu(cpu, 0, NULL);
+ return register_cpu(cpu, 0);
}
subsys_initcall(topology_init);
diff --git a/arch/sh64/mach-cayman/irq.c b/arch/sh64/mach-cayman/irq.c
index f797c84bfdd1..05eb7cdc26f0 100644
--- a/arch/sh64/mach-cayman/irq.c
+++ b/arch/sh64/mach-cayman/irq.c
@@ -187,7 +187,7 @@ void init_cayman_irq(void)
}
for (i=0; i<NR_EXT_IRQS; i++) {
- irq_desc[START_EXT_IRQS + i].handler = &cayman_irq_type;
+ irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type;
}
/* Setup the SMSC interrupt */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index ae4c667c906f..79d177149fdb 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -208,7 +208,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
pa &= PAGE_MASK;
sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
- return (void __iomem *) (res->start + offset);
+ return (void __iomem *)(unsigned long)(res->start + offset);
}
/*
@@ -325,7 +325,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
res->name = sdev->prom_name;
}
- return (void *)res->start;
+ return (void *)(unsigned long)res->start;
err_noiommu:
release_resource(res);
@@ -819,7 +819,9 @@ _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
if (p + 32 >= e) /* Better than nothing */
break;
if ((nm = r->name) == 0) nm = "???";
- p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
+ p += sprintf(p, "%016llx-%016llx: %s\n",
+ (unsigned long long)r->start,
+ (unsigned long long)r->end, nm);
}
return p-buf;
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 001b8673b4bd..80a809478781 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type ebus_bus_type = {
.suspend = of_device_suspend,
.resume = of_device_resume,
};
+EXPORT_SYMBOL(ebus_bus_type);
#endif
#ifdef CONFIG_SBUS
@@ -149,6 +150,7 @@ struct bus_type sbus_bus_type = {
.suspend = of_device_suspend,
.resume = of_device_resume,
};
+EXPORT_SYMBOL(sbus_bus_type);
#endif
static int __init of_bus_driver_init(void)
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index bcfdddd0418a..5df3ebdc0ab1 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -860,7 +860,7 @@ char * __init pcibios_setup(char *str)
}
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
}
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 63b2b9bd778e..946ce6d15819 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -27,6 +27,11 @@
static struct device_node *allnodes;
+/* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+static DEFINE_RWLOCK(devtree_lock);
+
int of_device_is_compatible(struct device_node *device, const char *compat)
{
const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
}
EXPORT_SYMBOL(of_getintprop_default);
+int of_set_property(struct device_node *dp, const char *name, void *val, int len)
+{
+ struct property **prevp;
+ void *new_val;
+ int err;
+
+ new_val = kmalloc(len, GFP_KERNEL);
+ if (!new_val)
+ return -ENOMEM;
+
+ memcpy(new_val, val, len);
+
+ err = -ENODEV;
+
+ write_lock(&devtree_lock);
+ prevp = &dp->properties;
+ while (*prevp) {
+ struct property *prop = *prevp;
+
+ if (!strcmp(prop->name, name)) {
+ void *old_val = prop->value;
+ int ret;
+
+ ret = prom_setprop(dp->node, name, val, len);
+ err = -EINVAL;
+ if (ret >= 0) {
+ prop->value = new_val;
+ prop->length = len;
+
+ if (OF_IS_DYNAMIC(prop))
+ kfree(old_val);
+
+ OF_MARK_DYNAMIC(prop);
+
+ err = 0;
+ }
+ break;
+ }
+ prevp = &(*prevp)->next;
+ }
+ write_unlock(&devtree_lock);
+
+ /* XXX Upate procfs if necessary... */
+
+ return err;
+}
+EXPORT_SYMBOL(of_set_property);
+
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
@@ -354,7 +407,9 @@ static char * __init build_full_name(struct device_node *dp)
return n;
}
-static struct property * __init build_one_prop(phandle node, char *prev)
+static unsigned int unique_id;
+
+static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
{
static struct property *tmp = NULL;
struct property *p;
@@ -364,25 +419,34 @@ static struct property * __init build_one_prop(phandle node, char *prev)
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
- } else
+ } else {
p = prom_early_alloc(sizeof(struct property) + 32);
+ p->unique_id = unique_id++;
+ }
p->name = (char *) (p + 1);
- if (prev == NULL) {
- prom_firstprop(node, p->name);
+ if (special_name) {
+ p->length = special_len;
+ p->value = prom_early_alloc(special_len);
+ memcpy(p->value, special_val, special_len);
} else {
- prom_nextprop(node, prev, p->name);
- }
- if (strlen(p->name) == 0) {
- tmp = p;
- return NULL;
- }
- p->length = prom_getproplen(node, p->name);
- if (p->length <= 0) {
- p->length = 0;
- } else {
- p->value = prom_early_alloc(p->length);
- len = prom_getproperty(node, p->name, p->value, p->length);
+ if (prev == NULL) {
+ prom_firstprop(node, p->name);
+ } else {
+ prom_nextprop(node, prev, p->name);
+ }
+ if (strlen(p->name) == 0) {
+ tmp = p;
+ return NULL;
+ }
+ p->length = prom_getproplen(node, p->name);
+ if (p->length <= 0) {
+ p->length = 0;
+ } else {
+ p->value = prom_early_alloc(p->length + 1);
+ prom_getproperty(node, p->name, p->value, p->length);
+ ((unsigned char *)p->value)[p->length] = '\0';
+ }
}
return p;
}
@@ -391,9 +455,14 @@ static struct property * __init build_prop_list(phandle node)
{
struct property *head, *tail;
- head = tail = build_one_prop(node, NULL);
+ head = tail = build_one_prop(node, NULL,
+ ".node", &node, sizeof(node));
+
+ tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
+ tail = tail->next;
while(tail) {
- tail->next = build_one_prop(node, tail->name);
+ tail->next = build_one_prop(node, tail->name,
+ NULL, NULL, 0);
tail = tail->next;
}
@@ -422,6 +491,7 @@ static struct device_node * __init create_node(phandle node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
+ dp->unique_id = unique_id++;
kref_init(&dp->kref);
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index a893a9cc9534..2e5d08ce217b 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -496,7 +496,7 @@ static int __init topology_init(void)
if (!p)
err = -ENOMEM;
else
- register_cpu(p, i, NULL);
+ register_cpu(p, i);
}
return err;
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index fa5006946062..5db7e1d85385 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -9,3 +9,5 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o atomic32.o bitops.o \
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
+
+obj-y += iomap.o
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
new file mode 100644
index 000000000000..54501c1ca785
--- /dev/null
+++ b/arch/sparc/lib/iomap.c
@@ -0,0 +1,48 @@
+/*
+ * Implement the sparc iomap interfaces
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *) (unsigned long) port;
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+ /* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ /* What? */
+ return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+ /* nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 2c42894b188f..c2c69c167d18 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -6,6 +6,7 @@
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -16,8 +17,8 @@
#include <asm/ebus.h>
#include <asm/auxio.h>
-/* This cannot be static, as it is referenced in irq.c */
void __iomem *auxio_register = NULL;
+EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 31e0fbb0d82c..ab9e640df228 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -151,7 +151,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %9s", irq_desc[i].handler->typename);
+ seq_printf(p, " %9s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -224,7 +224,7 @@ static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
- cpumask_t mask = irq_affinity[virt_irq];
+ cpumask_t mask = irq_desc[virt_irq].affinity;
int cpuid;
if (cpus_equal(mask, CPU_MASK_ALL)) {
@@ -414,8 +414,8 @@ void irq_install_pre_handler(int virt_irq,
data->pre_handler_arg1 = arg1;
data->pre_handler_arg2 = arg2;
- desc->handler = (desc->handler == &sun4u_irq ?
- &sun4u_irq_ack : &sun4v_irq_ack);
+ desc->chip = (desc->chip == &sun4u_irq ?
+ &sun4u_irq_ack : &sun4v_irq_ack);
}
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
@@ -431,7 +431,7 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
bucket = &ivector_table[ino];
if (!bucket->virt_irq) {
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
- irq_desc[bucket->virt_irq].handler = &sun4u_irq;
+ irq_desc[bucket->virt_irq].chip = &sun4u_irq;
}
desc = irq_desc + bucket->virt_irq;
@@ -465,7 +465,7 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
bucket = &ivector_table[sysino];
if (!bucket->virt_irq) {
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
- irq_desc[bucket->virt_irq].handler = &sun4v_irq;
+ irq_desc[bucket->virt_irq].chip = &sun4v_irq;
}
desc = irq_desc + bucket->virt_irq;
@@ -563,67 +563,6 @@ void handler_irq(int irq, struct pt_regs *regs)
irq_exit();
}
-#ifdef CONFIG_BLK_DEV_FD
-extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
-
-/* XXX No easy way to include asm/floppy.h XXX */
-extern unsigned char *pdma_vaddr;
-extern unsigned long pdma_size;
-extern volatile int doing_pdma;
-extern unsigned long fdc_status;
-
-irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
-{
- if (likely(doing_pdma)) {
- void __iomem *stat = (void __iomem *) fdc_status;
- unsigned char *vaddr = pdma_vaddr;
- unsigned long size = pdma_size;
- u8 val;
-
- while (size) {
- val = readb(stat);
- if (unlikely(!(val & 0x80))) {
- pdma_vaddr = vaddr;
- pdma_size = size;
- return IRQ_HANDLED;
- }
- if (unlikely(!(val & 0x20))) {
- pdma_vaddr = vaddr;
- pdma_size = size;
- doing_pdma = 0;
- goto main_interrupt;
- }
- if (val & 0x40) {
- /* read */
- *vaddr++ = readb(stat + 1);
- } else {
- unsigned char data = *vaddr++;
-
- /* write */
- writeb(data, stat + 1);
- }
- size--;
- }
-
- pdma_vaddr = vaddr;
- pdma_size = size;
-
- /* Send Terminal Count pulse to floppy controller. */
- val = readb(auxio_register);
- val |= AUXIO_AUX1_FTCNT;
- writeb(val, auxio_register);
- val &= ~AUXIO_AUX1_FTCNT;
- writeb(val, auxio_register);
-
- doing_pdma = 0;
- }
-
-main_interrupt:
- return floppy_interrupt(irq, dev_cookie, regs);
-}
-EXPORT_SYMBOL(sparc_floppy_irq);
-#endif
-
struct sun5_timer {
u64 count0;
u64 limit0;
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 566aa343aa62..768475bbce82 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type isa_bus_type = {
.suspend = of_device_suspend,
.resume = of_device_resume,
};
+EXPORT_SYMBOL(isa_bus_type);
struct bus_type ebus_bus_type = {
.name = "ebus",
@@ -147,6 +148,7 @@ struct bus_type ebus_bus_type = {
.suspend = of_device_suspend,
.resume = of_device_resume,
};
+EXPORT_SYMBOL(ebus_bus_type);
#endif
#ifdef CONFIG_SBUS
@@ -158,6 +160,7 @@ struct bus_type sbus_bus_type = {
.suspend = of_device_suspend,
.resume = of_device_resume,
};
+EXPORT_SYMBOL(sbus_bus_type);
#endif
static int __init of_bus_driver_init(void)
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 6c9e3e94abaa..20ca9ec8fd3b 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -357,7 +357,7 @@ void pcibios_update_irq(struct pci_dev *pdev, int irq)
}
void pcibios_align_resource(void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
}
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index e9d703eea806..8e87e7ea0325 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -27,6 +27,11 @@
static struct device_node *allnodes;
+/* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+static DEFINE_RWLOCK(devtree_lock);
+
int of_device_is_compatible(struct device_node *device, const char *compat)
{
const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
}
EXPORT_SYMBOL(of_getintprop_default);
+int of_set_property(struct device_node *dp, const char *name, void *val, int len)
+{
+ struct property **prevp;
+ void *new_val;
+ int err;
+
+ new_val = kmalloc(len, GFP_KERNEL);
+ if (!new_val)
+ return -ENOMEM;
+
+ memcpy(new_val, val, len);
+
+ err = -ENODEV;
+
+ write_lock(&devtree_lock);
+ prevp = &dp->properties;
+ while (*prevp) {
+ struct property *prop = *prevp;
+
+ if (!strcmp(prop->name, name)) {
+ void *old_val = prop->value;
+ int ret;
+
+ ret = prom_setprop(dp->node, name, val, len);
+ err = -EINVAL;
+ if (ret >= 0) {
+ prop->value = new_val;
+ prop->length = len;
+
+ if (OF_IS_DYNAMIC(prop))
+ kfree(old_val);
+
+ OF_MARK_DYNAMIC(prop);
+
+ err = 0;
+ }
+ break;
+ }
+ prevp = &(*prevp)->next;
+ }
+ write_unlock(&devtree_lock);
+
+ /* XXX Upate procfs if necessary... */
+
+ return err;
+}
+EXPORT_SYMBOL(of_set_property);
+
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
@@ -531,7 +584,9 @@ static char * __init build_full_name(struct device_node *dp)
return n;
}
-static struct property * __init build_one_prop(phandle node, char *prev)
+static unsigned int unique_id;
+
+static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
{
static struct property *tmp = NULL;
struct property *p;
@@ -540,25 +595,35 @@ static struct property * __init build_one_prop(phandle node, char *prev)
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
- } else
+ } else {
p = prom_early_alloc(sizeof(struct property) + 32);
+ p->unique_id = unique_id++;
+ }
p->name = (char *) (p + 1);
- if (prev == NULL) {
- prom_firstprop(node, p->name);
+ if (special_name) {
+ strcpy(p->name, special_name);
+ p->length = special_len;
+ p->value = prom_early_alloc(special_len);
+ memcpy(p->value, special_val, special_len);
} else {
- prom_nextprop(node, prev, p->name);
- }
- if (strlen(p->name) == 0) {
- tmp = p;
- return NULL;
- }
- p->length = prom_getproplen(node, p->name);
- if (p->length <= 0) {
- p->length = 0;
- } else {
- p->value = prom_early_alloc(p->length);
- prom_getproperty(node, p->name, p->value, p->length);
+ if (prev == NULL) {
+ prom_firstprop(node, p->name);
+ } else {
+ prom_nextprop(node, prev, p->name);
+ }
+ if (strlen(p->name) == 0) {
+ tmp = p;
+ return NULL;
+ }
+ p->length = prom_getproplen(node, p->name);
+ if (p->length <= 0) {
+ p->length = 0;
+ } else {
+ p->value = prom_early_alloc(p->length + 1);
+ prom_getproperty(node, p->name, p->value, p->length);
+ ((unsigned char *)p->value)[p->length] = '\0';
+ }
}
return p;
}
@@ -567,9 +632,14 @@ static struct property * __init build_prop_list(phandle node)
{
struct property *head, *tail;
- head = tail = build_one_prop(node, NULL);
+ head = tail = build_one_prop(node, NULL,
+ ".node", &node, sizeof(node));
+
+ tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
+ tail = tail->next;
while(tail) {
- tail->next = build_one_prop(node, tail->name);
+ tail->next = build_one_prop(node, tail->name,
+ NULL, NULL, 0);
tail = tail->next;
}
@@ -598,6 +668,7 @@ static struct device_node * __init create_node(phandle node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
+ dp->unique_id = unique_id++;
kref_init(&dp->kref);
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index a6a7d8168346..116d9632002d 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -537,7 +537,7 @@ static int __init topology_init(void)
for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
- register_cpu(p, i, NULL);
+ register_cpu(p, i);
err = 0;
}
}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6e002aacb961..1605967cce91 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -31,6 +31,40 @@
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* To debug kernel to catch accesses to certain virtual/physical addresses.
* Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -263,7 +297,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault_code = get_thread_fault_code();
- if (notify_die(DIE_PAGE_FAULT, "page_fault", regs,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs,
fault_code, 0, SIGSEGV) == NOTIFY_STOP)
return;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 513993414747..cb75a27adb51 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/poison.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kprobes.h>
@@ -1520,7 +1521,7 @@ void free_initmem(void)
page = (addr +
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
p = virt_to_page(page);
ClearPageReserved(p);
@@ -1568,6 +1569,7 @@ pgprot_t PAGE_EXEC __read_mostly;
unsigned long pg_iobits __read_mostly;
unsigned long _PAGE_IE __read_mostly;
+EXPORT_SYMBOL(_PAGE_IE);
unsigned long _PAGE_E __read_mostly;
EXPORT_SYMBOL(_PAGE_E);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0897852b09a3..290cec6d69e2 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1222,7 +1222,7 @@ int open_ubd_file(char *file, struct openflags *openflags, int shared,
}
}
- /* Succesful return case! */
+ /* Successful return case! */
if(backing_file_out == NULL)
return(fd);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 2ffda012385e..fae43a3054a0 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -63,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -451,13 +451,13 @@ void __init init_IRQ(void)
irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
irq_desc[TIMER_IRQ].action = NULL;
irq_desc[TIMER_IRQ].depth = 1;
- irq_desc[TIMER_IRQ].handler = &SIGVTALRM_irq_type;
+ irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
enable_irq(TIMER_IRQ);
for (i = 1; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
- irq_desc[i].handler = &normal_irq_type;
+ irq_desc[i].chip = &normal_irq_type;
enable_irq(i);
}
}
diff --git a/arch/v850/kernel/irq.c b/arch/v850/kernel/irq.c
index 7a151c26f82e..858c45819aab 100644
--- a/arch/v850/kernel/irq.c
+++ b/arch/v850/kernel/irq.c
@@ -65,10 +65,10 @@ int show_interrupts(struct seq_file *p, void *v)
int j;
int count = 0;
int num = -1;
- const char *type_name = irq_desc[irq].handler->typename;
+ const char *type_name = irq_desc[irq].chip->typename;
for (j = 0; j < NR_IRQS; j++)
- if (irq_desc[j].handler->typename == type_name){
+ if (irq_desc[j].chip->typename == type_name){
if (irq == j)
num = count;
count++;
@@ -117,7 +117,7 @@ init_irq_handlers (int base_irq, int num, int interval,
irq_desc[base_irq].status = IRQ_DISABLED;
irq_desc[base_irq].action = NULL;
irq_desc[base_irq].depth = 1;
- irq_desc[base_irq].handler = irq_type;
+ irq_desc[base_irq].chip = irq_type;
base_irq += interval;
}
}
diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c
index ffbb6d073bf2..3a7c5c9c3ac6 100644
--- a/arch/v850/kernel/rte_mb_a_pci.c
+++ b/arch/v850/kernel/rte_mb_a_pci.c
@@ -329,7 +329,7 @@ void pcibios_fixup_bus(struct pci_bus *b)
void
pcibios_align_resource (void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
}
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index af44130f0d65..91039844820c 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -370,6 +370,8 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
config HPET_TIMER
bool
@@ -386,24 +388,45 @@ config HPET_EMULATE_RTC
bool "Provide RTC interrupt"
depends on HPET_TIMER && RTC=y
-config GART_IOMMU
- bool "K8 GART IOMMU support"
+# Mark as embedded because too many people got it wrong.
+# The code disables itself when not needed.
+config IOMMU
+ bool "IOMMU support" if EMBEDDED
default y
select SWIOTLB
select AGP
depends on PCI
help
- Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors
- and for the bounce buffering software IOMMU.
- Needed to run systems with more than 3GB of memory properly with
- 32-bit PCI devices that do not support DAC (Double Address Cycle).
- The IOMMU can be turned off at runtime with the iommu=off parameter.
- Normally the kernel will take the right choice by itself.
- This option includes a driver for the AMD Opteron/Athlon64 IOMMU
- northbridge and a software emulation used on other systems without
- hardware IOMMU. If unsure, say Y.
-
-# need this always selected by GART_IOMMU for the VIA workaround
+ Support for full DMA access of devices with 32bit memory access only
+ on systems with more than 3GB. This is usually needed for USB,
+ sound, many IDE/SATA chipsets and some other devices.
+ Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
+ based IOMMU and a software bounce buffer based IOMMU used on Intel
+ systems and as fallback.
+ The code is only active when needed (enough memory and limited
+ device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
+ too.
+
+config CALGARY_IOMMU
+ bool "IBM Calgary IOMMU support"
+ default y
+ select SWIOTLB
+ depends on PCI && EXPERIMENTAL
+ help
+ Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ systems. Needed to run systems with more than 3GB of memory
+ properly with 32-bit PCI devices that do not support DAC
+ (Double Address Cycle). Calgary also supports bus level
+ isolation, where all DMAs pass through the IOMMU. This
+ prevents them from going anywhere except their intended
+ destination. This catches hard-to-find kernel bugs and
+ mis-behaving drivers and devices that do not use the DMA-API
+ properly to set up their DMA buffers. The IOMMU can be
+ turned off at boot time with the iommu=off parameter.
+ Normally the kernel will make the right choice by itself.
+ If unsure, say Y.
+
+# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
@@ -501,6 +524,10 @@ config REORDER
optimal TLB usage. If you have pretty much any version of binutils,
this can increase your kernel build time by roughly one minute.
+config K8_NB
+ def_bool y
+ depends on AGP_AMD64 || IOMMU || (PCI && NUMA)
+
endmenu
#
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index ea31b4c62105..1d92ab56c0f9 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -13,7 +13,7 @@ config DEBUG_RODATA
If in doubt, say "N".
config IOMMU_DEBUG
- depends on GART_IOMMU && DEBUG_KERNEL
+ depends on IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging"
help
Force the IOMMU to on even when you have less than 4GB of
@@ -35,6 +35,22 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit.
+
+config DEBUG_STACK_USAGE
+ bool "Stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
#config X86_REMOTE_DEBUG
# bool "kgdb debugging stub"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index e573e2ab5510..431bb4bc36cd 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -27,6 +27,7 @@ LDFLAGS_vmlinux :=
CHECKFLAGS += -D__x86_64__ -m64
cflags-y :=
+cflags-kernel-y :=
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
@@ -35,7 +36,7 @@ cflags-y += -m64
cflags-y += -mno-red-zone
cflags-y += -mcmodel=kernel
cflags-y += -pipe
-cflags-$(CONFIG_REORDER) += -ffunction-sections
+cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections
# this makes reading assembly source easier, but produces worse code
# actually it makes the kernel smaller too.
cflags-y += -fno-reorder-blocks
@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time)
cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
CFLAGS += $(cflags-y)
+CFLAGS_KERNEL += $(cflags-kernel-y)
AFLAGS += -m64
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 43ee6c50c277..deb063e7762d 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
isoimage: $(BOOTIMAGE)
-rm -rf $(obj)/isoimage
mkdir $(obj)/isoimage
- cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
- $(obj)/isoimage
+ for i in lib lib64 share end ; do \
+ if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ break ; \
+ fi ; \
+ if [ $$i = end ] ; then exit 1 ; fi ; \
+ done
cp $(BOOTIMAGE) $(obj)/isoimage/linux
echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index cf4b88c416dc..3755b2e394d0 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -77,11 +77,11 @@ static void gzip_release(void **);
*/
static unsigned char *real_mode; /* Pointer to real-mode data */
-#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
+#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
#ifndef STANDARD_MEMORY_BIOS_CALL
-#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
+#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
#endif
-#define SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
extern unsigned char input_data[];
extern int input_len;
@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0;
static void *malloc(int size);
static void free(void *where);
-
-void* memset(void* s, int c, unsigned n);
-void* memcpy(void* dest, const void* src, unsigned n);
+
+static void *memset(void *s, int c, unsigned n);
+static void *memcpy(void *dest, const void *src, unsigned n);
static void putstr(const char *);
@@ -162,8 +162,8 @@ static void putstr(const char *s)
int x,y,pos;
char c;
- x = SCREEN_INFO.orig_x;
- y = SCREEN_INFO.orig_y;
+ x = RM_SCREEN_INFO.orig_x;
+ y = RM_SCREEN_INFO.orig_y;
while ( ( c = *s++ ) != '\0' ) {
if ( c == '\n' ) {
@@ -184,8 +184,8 @@ static void putstr(const char *s)
}
}
- SCREEN_INFO.orig_x = x;
- SCREEN_INFO.orig_y = y;
+ RM_SCREEN_INFO.orig_x = x;
+ RM_SCREEN_INFO.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */
outb_p(14, vidport);
@@ -194,7 +194,7 @@ static void putstr(const char *s)
outb_p(0xff & (pos >> 1), vidport+1);
}
-void* memset(void* s, int c, unsigned n)
+static void* memset(void* s, int c, unsigned n)
{
int i;
char *ss = (char*)s;
@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n)
return s;
}
-void* memcpy(void* dest, const void* src, unsigned n)
+static void* memcpy(void* dest, const void* src, unsigned n)
{
int i;
char *d = (char *)dest, *s = (char *)src;
@@ -278,15 +278,15 @@ static void error(char *x)
putstr(x);
putstr("\n\n -- System halted");
- while(1);
+ while(1); /* Halt */
}
-void setup_normal_output_buffer(void)
+static void setup_normal_output_buffer(void)
{
#ifdef STANDARD_MEMORY_BIOS_CALL
- if (EXT_MEM_K < 1024) error("Less than 2MB of memory");
+ if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
#else
- if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
+ if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif
output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode;
@@ -297,13 +297,13 @@ struct moveparams {
uch *high_buffer_start; int hcount;
};
-void setup_output_buffer_if_we_run_high(struct moveparams *mv)
+static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
{
high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
#ifdef STANDARD_MEMORY_BIOS_CALL
- if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
+ if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
#else
- if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
+ if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
#endif
mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
mv->high_buffer_start = high_buffer_start;
}
-void close_output_buffer_if_we_run_high(struct moveparams *mv)
+static void close_output_buffer_if_we_run_high(struct moveparams *mv)
{
if (bytes_out > low_buffer_size) {
mv->lcount = low_buffer_size;
@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
{
real_mode = rmode;
- if (SCREEN_INFO.orig_video_mode == 7) {
+ if (RM_SCREEN_INFO.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
} else {
@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
vidport = 0x3d4;
}
- lines = SCREEN_INFO.orig_video_lines;
- cols = SCREEN_INFO.orig_video_cols;
+ lines = RM_SCREEN_INFO.orig_video_lines;
+ cols = RM_SCREEN_INFO.orig_video_cols;
if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
else setup_output_buffer_if_we_run_high(mv);
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index c44f5e2ec100..eae86691709a 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -149,10 +149,8 @@ int main(int argc, char ** argv)
sz = sb.st_size;
fprintf (stderr, "System is %d kB\n", sz/1024);
sys_size = (sz + 15) / 16;
- /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */
- if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE))
- die("System is too big. Try using %smodules.",
- is_big_kernel ? "" : "bzImage or ");
+ if (!is_big_kernel && sys_size > DEF_SYSSIZE)
+ die("System is too big. Try using bzImage or modules.");
while (sz > 0) {
int l, n;
diff --git a/arch/x86_64/boot/video.S b/arch/x86_64/boot/video.S
index 32327bb37aff..2aa565c136e5 100644
--- a/arch/x86_64/boot/video.S
+++ b/arch/x86_64/boot/video.S
@@ -1929,6 +1929,7 @@ skip10: movb %ah, %al
ret
store_edid:
+#ifdef CONFIG_FIRMWARE_EDID
pushw %es # just save all registers
pushw %ax
pushw %bx
@@ -1946,6 +1947,22 @@ store_edid:
rep
stosl
+ pushw %es # save ES
+ xorw %di, %di # Report Capability
+ pushw %di
+ popw %es # ES:DI must be 0:0
+ movw $0x4f15, %ax
+ xorw %bx, %bx
+ xorw %cx, %cx
+ int $0x10
+ popw %es # restore ES
+
+ cmpb $0x00, %ah # call successful
+ jne no_edid
+
+ cmpb $0x4f, %al # function supported
+ jne no_edid
+
movw $0x4f15, %ax # do VBE/DDC
movw $0x01, %bx
movw $0x00, %cx
@@ -1953,12 +1970,14 @@ store_edid:
movw $0x140, %di
int $0x10
+no_edid:
popw %di # restore all registers
popw %dx
popw %cx
popw %bx
popw %ax
popw %es
+#endif
ret
# VIDEO_SELECT-only variables
diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S
index 483cbb23ab8d..26b40de4d0b0 100644
--- a/arch/x86_64/crypto/aes-x86_64-asm.S
+++ b/arch/x86_64/crypto/aes-x86_64-asm.S
@@ -15,6 +15,10 @@
.text
+#include <asm/asm-offsets.h>
+
+#define BASE crypto_tfm_ctx_offset
+
#define R1 %rax
#define R1E %eax
#define R1X %ax
@@ -46,19 +50,19 @@
#define R10 %r10
#define R11 %r11
-#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
+#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
.global FUNC; \
.type FUNC,@function; \
.align 8; \
FUNC: movq r1,r2; \
movq r3,r4; \
- leaq BASE+52(r8),r9; \
+ leaq BASE+KEY+52(r8),r9; \
movq r10,r11; \
movl (r7),r5 ## E; \
movl 4(r7),r1 ## E; \
movl 8(r7),r6 ## E; \
movl 12(r7),r7 ## E; \
- movl (r8),r10 ## E; \
+ movl BASE(r8),r10 ## E; \
xorl -48(r9),r5 ## E; \
xorl -44(r9),r1 ## E; \
xorl -40(r9),r6 ## E; \
@@ -128,8 +132,8 @@ FUNC: movq r1,r2; \
movl r3 ## E,r1 ## E; \
movl r4 ## E,r2 ## E;
-#define entry(FUNC,BASE,B128,B192) \
- prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
+#define entry(FUNC,KEY,B128,B192) \
+ prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11)
@@ -147,9 +151,9 @@ FUNC: movq r1,r2; \
#define decrypt_final(TAB,OFFSET) \
round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4)
-/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */
+/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_encrypt,0,enc128,enc192)
+ entry(aes_enc_blk,0,enc128,enc192)
encrypt_round(aes_ft_tab,-96)
encrypt_round(aes_ft_tab,-80)
enc192: encrypt_round(aes_ft_tab,-64)
@@ -166,9 +170,9 @@ enc128: encrypt_round(aes_ft_tab,-32)
encrypt_final(aes_fl_tab,112)
return
-/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */
+/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_decrypt,240,dec128,dec192)
+ entry(aes_dec_blk,240,dec128,dec192)
decrypt_round(aes_it_tab,-96)
decrypt_round(aes_it_tab,-80)
dec192: decrypt_round(aes_it_tab,-64)
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index 6f77e7700d32..68866fab37aa 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -227,10 +227,10 @@ static void __init gen_tabs(void)
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
}
-static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
- u32 *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct aes_ctx *ctx = ctx_arg;
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 i, j, t, u, v, w;
@@ -283,8 +283,18 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
return 0;
}
-extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in);
-extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in);
+asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ aes_enc_blk(tfm, dst, src);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ aes_dec_blk(tfm, dst, src);
+}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 69db0c0721d1..e69d403949c8 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17-rc1-git11
-# Sun Apr 16 07:22:36 2006
+# Linux kernel version: 2.6.17-git6
+# Sat Jun 24 00:52:28 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_UID16=y
-CONFIG_VM86=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
@@ -57,7 +56,6 @@ CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
CONFIG_SLAB=y
-CONFIG_DOUBLEFAULT=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
-CONFIG_GART_IOMMU=y
+CONFIG_IOMMU=y
+# CONFIG_CALGARY_IOMMU is not set
CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
@@ -158,6 +157,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
# CONFIG_REORDER is not set
+CONFIG_K8_NB=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_ISA_DMA_API=y
@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -305,7 +307,10 @@ CONFIG_IPV6=y
# CONFIG_INET6_IPCOMP is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
#
@@ -344,6 +349,7 @@ CONFIG_IPV6=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
#
# Connector - unified userspace <-> kernelspace linker
@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_PDC_ADMA is not set
+# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y
#
# Device Drivers
#
-
-#
-# Texas Instruments PCILynx requires I2C
-#
+# CONFIG_IEEE1394_PCILYNX is not set
CONFIG_IEEE1394_OHCI1394=y
#
@@ -645,7 +650,16 @@ CONFIG_VORTEX=y
#
# Tulip family network device support
#
-# CONFIG_NET_TULIP is not set
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_ULI526X is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
@@ -697,6 +711,7 @@ CONFIG_TIGON3=y
# CONFIG_IXGB is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
+# CONFIG_MYRI10GE is not set
#
# Token Ring devices
@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y
#
# I2C support
#
-# CONFIG_I2C is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_ISA=m
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
#
# SPI support
@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y
#
# Dallas's 1-wire bus
#
-# CONFIG_W1 is not set
#
# Hardware Monitoring support
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+CONFIG_SENSORS_SMSC47B397=m
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -918,6 +1019,7 @@ CONFIG_HWMON=y
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
@@ -953,28 +1055,17 @@ CONFIG_SOUND=y
# Open Sound System
#
CONFIG_SOUND_PRIME=y
-CONFIG_OBSOLETE_OSS_DRIVER=y
# CONFIG_SOUND_BT878 is not set
-# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
-# CONFIG_SOUND_CS4281 is not set
-# CONFIG_SOUND_ES1370 is not set
# CONFIG_SOUND_ES1371 is not set
-# CONFIG_SOUND_ESSSOLO1 is not set
-# CONFIG_SOUND_MAESTRO is not set
-# CONFIG_SOUND_MAESTRO3 is not set
CONFIG_SOUND_ICH=y
-# CONFIG_SOUND_SONICVIBES is not set
# CONFIG_SOUND_TRIDENT is not set
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_ALI5455 is not set
-# CONFIG_SOUND_FORTE is not set
-# CONFIG_SOUND_RME96XX is not set
-# CONFIG_SOUND_AD1980 is not set
+# CONFIG_SOUND_TVMIXER is not set
#
# USB support
@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
+# CONFIG_USB_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set
@@ -1141,6 +1235,19 @@ CONFIG_USB_MON=y
# CONFIG_RTC_CLASS is not set
#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
# Firmware Drivers
#
# CONFIG_EDD is not set
@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
-# CONFIG_UNWIND_INFO is not set
+CONFIG_UNWIND_INFO=y
+CONFIG_STACK_UNWIND=y
# CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/fpu32.c b/arch/x86_64/ia32/fpu32.c
index 1c23095f1813..2c8209a3605a 100644
--- a/arch/x86_64/ia32/fpu32.c
+++ b/arch/x86_64/ia32/fpu32.c
@@ -2,7 +2,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs.
* FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
* This is used for ptrace, signals and coredumps in 32bit emulation.
- * $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $
*/
#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index e0a92439f634..25e5ca22204c 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -6,8 +6,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
- *
- * $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $
*/
#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 4ec594ab1a98..c536fa98ea37 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -155,6 +155,7 @@ sysenter_tracesys:
.previous
jmp sysenter_do_call
CFI_ENDPROC
+ENDPROC(ia32_sysenter_target)
/*
* 32bit SYSCALL instruction entry.
@@ -178,7 +179,7 @@ sysenter_tracesys:
*/
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
- CFI_DEF_CFA rsp,0
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
swapgs
@@ -249,6 +250,7 @@ cstar_tracesys:
.quad 1b,ia32_badarg
.previous
jmp cstar_do_call
+END(ia32_cstar_target)
ia32_badarg:
movq $-EFAULT,%rax
@@ -314,16 +316,13 @@ ia32_tracesys:
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
jmp ia32_do_syscall
+END(ia32_syscall)
ia32_badsys:
movq $0,ORIG_RAX-ARGOFFSET(%rsp)
movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
jmp int_ret_from_sys_call
-ni_syscall:
- movq %rax,%rdi
- jmp sys32_ni_syscall
-
quiet_ni_syscall:
movq $-ENOSYS,%rax
ret
@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common)
RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
+END(ia32_ptregs_common)
.section .rodata,"a"
.align 8
- .globl ia32_sys_call_table
ia32_sys_call_table:
.quad sys_restart_syscall
.quad sys_exit
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 23a4515a73b4..a590b7a0d92d 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -7,8 +7,6 @@
*
* This allows to access 64bit processes too; but there is no way to see the extended
* register contents.
- *
- * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $
*/
#include <linux/kernel.h>
@@ -27,6 +25,7 @@
#include <asm/debugreg.h>
#include <asm/i387.h>
#include <asm/fpu32.h>
+#include <asm/ia32.h>
/*
* Determines which flags the user has access to [1 = access, 0 = no access].
@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
#undef R32
+static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
+{
+ int ret;
+ compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
+ siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
+ if (request == PTRACE_SETSIGINFO) {
+ ret = copy_siginfo_from_user32(si, si32);
+ if (ret)
+ return ret;
+ }
+ ret = sys_ptrace(request, pid, addr, (unsigned long)si);
+ if (ret)
+ return ret;
+ if (request == PTRACE_GETSIGINFO)
+ ret = copy_siginfo_to_user32(si32, si);
+ return ret;
+}
+
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{
struct task_struct *child;
@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
__u32 val;
switch (request) {
- default:
+ case PTRACE_TRACEME:
+ case PTRACE_ATTACH:
+ case PTRACE_KILL:
+ case PTRACE_CONT:
+ case PTRACE_SINGLESTEP:
+ case PTRACE_DETACH:
+ case PTRACE_SYSCALL:
+ case PTRACE_SETOPTIONS:
return sys_ptrace(request, pid, addr, data);
+ default:
+ return -EINVAL;
+
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
case PTRACE_POKEDATA:
@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_GETFPXREGS:
case PTRACE_GETEVENTMSG:
break;
- }
- if (request == PTRACE_TRACEME)
- return ptrace_traceme();
+ case PTRACE_SETSIGINFO:
+ case PTRACE_GETSIGINFO:
+ return ptrace32_siginfo(request, pid, addr, data);
+ }
child = ptrace_get_task_struct(pid);
if (IS_ERR(child))
@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
default:
- ret = -EINVAL;
- break;
+ BUG();
}
out:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index f182b20858e2..dc88154c412b 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
-int sys32_ni_syscall(int call)
-{
- struct task_struct *me = current;
- static char lastcomm[sizeof(me->comm)];
-
- if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
- printk(KERN_INFO "IA32 syscall %d from %s not implemented\n",
- call, me->comm);
- strncpy(lastcomm, me->comm, sizeof(lastcomm));
- }
- return -ENOSYS;
-}
-
/* 32-bit timeval and related flotsam. */
asmlinkage long
@@ -916,7 +903,7 @@ long sys32_vm86_warning(void)
struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
- printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
+ compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm));
}
@@ -929,13 +916,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
}
-static int __init ia32_init (void)
-{
- printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n");
- return 0;
-}
-
-__initcall(ia32_init);
-
-extern unsigned long ia32_sys_call_table[];
-EXPORT_SYMBOL(ia32_sys_call_table);
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 059c88313f4e..aeb9c560be88 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
x8664_ksyms.o i387.o syscall.o vsyscall.o \
setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
- pci-dma.o pci-nommu.o
+ pci-dma.o pci-nommu.o alternative.o
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -28,11 +28,13 @@ obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
+obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
+obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
obj-$(CONFIG_X86_VSMP) += vsmp.o
+obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_MODULES) += module.o
@@ -49,3 +51,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
+alternative-y += ../../i386/kernel/alternative.o
+
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 70b9d21ed675..a195ef06ec55 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -8,7 +8,6 @@
* because only the bootmem allocator can allocate 32+MB.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
- * $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $
*/
#include <linux/config.h>
#include <linux/kernel.h>
@@ -24,6 +23,7 @@
#include <asm/proto.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
+#include <asm/k8.h>
int iommu_aperture;
int iommu_aperture_disabled __initdata = 0;
@@ -37,8 +37,6 @@ int fix_aperture __initdata = 1;
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
-#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
-
static u32 __init allocate_aperture(void)
{
pg_data_t *nd0 = NODE_DATA(0);
@@ -68,20 +66,20 @@ static u32 __init allocate_aperture(void)
return (u32)__pa(p);
}
-static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size)
+static int __init aperture_valid(u64 aper_base, u32 aper_size)
{
if (!aper_base)
return 0;
if (aper_size < 64*1024*1024) {
- printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20);
+ printk("Aperture too small (%d MB)\n", aper_size>>20);
return 0;
}
if (aper_base + aper_size >= 0xffffffff) {
- printk("Aperture from %s beyond 4GB. Ignoring.\n",name);
+ printk("Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
- printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name);
+ printk("Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
return 1;
@@ -140,7 +138,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg);
- if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order))
+ if (!aperture_valid(aper, (32*1024*1024) << *order))
return 0;
return (u32)aper;
}
@@ -208,10 +206,10 @@ void __init iommu_hole_init(void)
fix = 0;
for (num = 24; num < 32; num++) {
- char name[30];
- if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
- continue;
+ if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
+ continue;
+ iommu_detected = 1;
iommu_aperture = 1;
aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
@@ -222,9 +220,7 @@ void __init iommu_hole_init(void)
printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
aper_base, aper_size>>20);
- sprintf(name, "northbridge cpu %d", num-24);
-
- if (!aperture_valid(name, aper_base, aper_size)) {
+ if (!aperture_valid(aper_base, aper_size)) {
fix = 1;
break;
}
@@ -273,7 +269,7 @@ void __init iommu_hole_init(void)
/* Fix up the north bridges */
for (num = 24; num < 32; num++) {
- if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
+ if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
continue;
/* Don't enable translation yet. That is done later.
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 29ef99001e05..b2ead91df218 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -100,7 +100,7 @@ void clear_local_APIC(void)
maxlvt = get_maxlvt();
/*
- * Masking an LVT entry on a P6 can trigger a local APIC error
+ * Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
@@ -851,7 +851,18 @@ void disable_APIC_timer(void)
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
+ /*
+ * When an illegal vector value (0-15) is written to an LVT
+ * entry and delivery mode is Fixed, the APIC may signal an
+ * illegal vector error, with out regard to whether the mask
+ * bit is set or whether an interrupt is actually seen on input.
+ *
+ * Boot sequence might call this function when the LVTT has
+ * '0' vector value. So make sure vector field is set to
+ * valid value.
+ */
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write(APIC_LVTT, v);
}
}
@@ -909,15 +920,13 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}
-#ifdef CONFIG_X86_MCE_AMD
-void setup_threshold_lvt(unsigned long lvt_off)
+void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
+ unsigned char msg_type, unsigned char mask)
{
- unsigned int v = 0;
- unsigned long reg = (lvt_off << 4) + 0x500;
- v |= THRESHOLD_APIC_VECTOR;
+ unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
+ unsigned int v = (mask << 16) | (msg_type << 8) | vector;
apic_write(reg, v);
}
-#endif /* CONFIG_X86_MCE_AMD */
#undef APIC_DIVISOR
@@ -983,7 +992,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
}
/*
- * oem_force_hpet_timer -- force HPET mode for some boxes.
+ * apic_is_clustered_box() -- Check if we can expect good TSC
*
* Thus far, the major user of this is IBM's Summit2 series:
*
@@ -991,7 +1000,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET.
*/
-__cpuinit int oem_force_hpet_timer(void)
+__cpuinit int apic_is_clustered_box(void)
{
int i, clusters, zeros;
unsigned id;
@@ -1022,8 +1031,7 @@ __cpuinit int oem_force_hpet_timer(void)
}
/*
- * If clusters > 2, then should be multi-chassis. Return 1 for HPET.
- * Else return 0 to use TSC.
+ * If clusters > 2, then should be multi-chassis.
* May have to revisit this when multi-core + hyperthreaded CPUs come
* out, but AFAIK this will work even for them.
*/
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index 38834bbbae11..96687e2beb2c 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
* and format the required data.
*/
+#include <linux/crypto.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -68,5 +69,7 @@ int main(void)
DEFINE(pbe_next, offsetof(struct pbe, next));
BLANK();
DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
return 0;
}
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 4e6c3b729e39..d8d5750d6106 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
for(;;)
- asm("hlt");
+ halt();
return 1;
}
static void smp_send_nmi_allbutself(void)
{
- send_IPI_allbutself(APIC_DM_NMI);
+ send_IPI_allbutself(NMI_VECTOR);
}
/*
@@ -161,7 +161,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
{
/*
* This function is only called after the system
- * has paniced or is otherwise in a critical state.
+ * has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 1ef6028f721e..9e94d834624b 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -1,7 +1,6 @@
/*
* Handle the memory map.
* The functions here do the job until bootmem takes over.
- * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
*
* Getting sanitize_e820_map() in sync with i386 version by applying change:
* - Provisions for empty E820 memory regions (reported by certain BIOSes).
@@ -621,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from)
}
unsigned long pci_mem_start = 0xaeedbabe;
+EXPORT_SYMBOL(pci_mem_start);
/*
* Search for the biggest gap in the low 32 bits of the e820
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 586b34c00c48..22cac4487b57 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -154,6 +154,7 @@ rff_trace:
GET_THREAD_INFO(%rcx)
jmp rff_action
CFI_ENDPROC
+END(ret_from_fork)
/*
* System call entry. Upto 6 arguments in registers are supported.
@@ -188,7 +189,7 @@ rff_trace:
ENTRY(system_call)
CFI_STARTPROC simple
- CFI_DEF_CFA rsp,0
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
swapgs
@@ -285,6 +286,7 @@ tracesys:
/* Use IRET because user could have changed frame */
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(system_call)
/*
* Syscall return path ending with IRET.
@@ -364,6 +366,7 @@ int_restore_rest:
cli
jmp int_with_check
CFI_ENDPROC
+END(int_ret_from_sys_call)
/*
* Certain special system calls that need to save a complete full stack frame.
@@ -375,6 +378,7 @@ int_restore_rest:
leaq \func(%rip),%rax
leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
jmp ptregscall_common
+END(\label)
.endm
CFI_STARTPROC
@@ -404,6 +408,7 @@ ENTRY(ptregscall_common)
CFI_REL_OFFSET rip, 0
ret
CFI_ENDPROC
+END(ptregscall_common)
ENTRY(stub_execve)
CFI_STARTPROC
@@ -418,6 +423,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(stub_execve)
/*
* sigreturn is special because it needs to restore all registers on return.
@@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
+END(stub_rt_sigreturn)
/*
* initial frame state for interrupts and exceptions
@@ -466,29 +473,18 @@ ENTRY(stub_rt_sigreturn)
/* 0(%rsp): interrupt number */
.macro interrupt func
cld
-#ifdef CONFIG_DEBUG_INFO
- SAVE_ALL
- movq %rsp,%rdi
- /*
- * Setup a stack frame pointer. This allows gdb to trace
- * back to the original stack.
- */
- movq %rsp,%rbp
- CFI_DEF_CFA_REGISTER rbp
-#else
SAVE_ARGS
leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
-#endif
+ pushq %rbp
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rbp, 0
+ movq %rsp,%rbp
+ CFI_DEF_CFA_REGISTER rbp
testl $3,CS(%rdi)
je 1f
swapgs
1: incl %gs:pda_irqcount # RED-PEN should check preempt count
- movq %gs:pda_irqstackptr,%rax
- cmoveq %rax,%rsp /*todo This needs CFI annotation! */
- pushq %rdi # save old stack
-#ifndef CONFIG_DEBUG_INFO
- CFI_ADJUST_CFA_OFFSET 8
-#endif
+ cmoveq %gs:pda_irqstackptr,%rsp
call \func
.endm
@@ -497,17 +493,11 @@ ENTRY(common_interrupt)
interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr:
- popq %rdi
-#ifndef CONFIG_DEBUG_INFO
- CFI_ADJUST_CFA_OFFSET -8
-#endif
cli
decl %gs:pda_irqcount
-#ifdef CONFIG_DEBUG_INFO
- movq RBP(%rdi),%rbp
+ leaveq
CFI_DEF_CFA_REGISTER rsp
-#endif
- leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
+ CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp)
@@ -589,14 +579,16 @@ retint_kernel:
call preempt_schedule_irq
jmp exit_intr
#endif
+
CFI_ENDPROC
+END(common_interrupt)
/*
* APIC interrupts.
*/
.macro apicinterrupt num,func
INTR_FRAME
- pushq $\num-256
+ pushq $~(\num)
CFI_ADJUST_CFA_OFFSET 8
interrupt \func
jmp ret_from_intr
@@ -605,17 +597,21 @@ retint_kernel:
ENTRY(thermal_interrupt)
apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
+END(thermal_interrupt)
ENTRY(threshold_interrupt)
apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
+END(threshold_interrupt)
#ifdef CONFIG_SMP
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
+END(reschedule_interrupt)
.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
+END(invalidate_interrupt\num)
.endm
INVALIDATE_ENTRY 0
@@ -629,17 +625,21 @@ ENTRY(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
+END(call_function_interrupt)
#endif
#ifdef CONFIG_X86_LOCAL_APIC
ENTRY(apic_timer_interrupt)
apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
+END(apic_timer_interrupt)
ENTRY(error_interrupt)
apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
+END(error_interrupt)
ENTRY(spurious_interrupt)
apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
+END(spurious_interrupt)
#endif
/*
@@ -777,6 +777,7 @@ error_kernelspace:
cmpq $gs_change,RIP(%rsp)
je error_swapgs
jmp error_sti
+END(error_entry)
/* Reload gs selector with exception handling */
/* edi: new selector */
@@ -794,6 +795,7 @@ gs_change:
CFI_ADJUST_CFA_OFFSET -8
ret
CFI_ENDPROC
+ENDPROC(load_gs_index)
.section __ex_table,"a"
.align 8
@@ -847,7 +849,7 @@ ENTRY(kernel_thread)
UNFAKE_STACK_FRAME
ret
CFI_ENDPROC
-
+ENDPROC(kernel_thread)
child_rip:
/*
@@ -860,6 +862,7 @@ child_rip:
# exit
xorl %edi, %edi
call do_exit
+ENDPROC(child_rip)
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -889,19 +892,24 @@ ENTRY(execve)
UNFAKE_STACK_FRAME
ret
CFI_ENDPROC
+ENDPROC(execve)
KPROBE_ENTRY(page_fault)
errorentry do_page_fault
+END(page_fault)
.previous .text
ENTRY(coprocessor_error)
zeroentry do_coprocessor_error
+END(coprocessor_error)
ENTRY(simd_coprocessor_error)
zeroentry do_simd_coprocessor_error
+END(simd_coprocessor_error)
ENTRY(device_not_available)
zeroentry math_state_restore
+END(device_not_available)
/* runs on exception stack */
KPROBE_ENTRY(debug)
@@ -911,6 +919,7 @@ KPROBE_ENTRY(debug)
paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
+END(debug)
.previous .text
/* runs on exception stack */
@@ -961,6 +970,7 @@ paranoid_schedule:
cli
jmp paranoid_userspace
CFI_ENDPROC
+END(nmi)
.previous .text
KPROBE_ENTRY(int3)
@@ -970,22 +980,28 @@ KPROBE_ENTRY(int3)
paranoidentry do_int3, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
+END(int3)
.previous .text
ENTRY(overflow)
zeroentry do_overflow
+END(overflow)
ENTRY(bounds)
zeroentry do_bounds
+END(bounds)
ENTRY(invalid_op)
zeroentry do_invalid_op
+END(invalid_op)
ENTRY(coprocessor_segment_overrun)
zeroentry do_coprocessor_segment_overrun
+END(coprocessor_segment_overrun)
ENTRY(reserved)
zeroentry do_reserved
+END(reserved)
/* runs on exception stack */
ENTRY(double_fault)
@@ -993,12 +1009,15 @@ ENTRY(double_fault)
paranoidentry do_double_fault
jmp paranoid_exit
CFI_ENDPROC
+END(double_fault)
ENTRY(invalid_TSS)
errorentry do_invalid_TSS
+END(invalid_TSS)
ENTRY(segment_not_present)
errorentry do_segment_not_present
+END(segment_not_present)
/* runs on exception stack */
ENTRY(stack_segment)
@@ -1006,19 +1025,24 @@ ENTRY(stack_segment)
paranoidentry do_stack_segment
jmp paranoid_exit
CFI_ENDPROC
+END(stack_segment)
KPROBE_ENTRY(general_protection)
errorentry do_general_protection
+END(general_protection)
.previous .text
ENTRY(alignment_check)
errorentry do_alignment_check
+END(alignment_check)
ENTRY(divide_error)
zeroentry do_divide_error
+END(divide_error)
ENTRY(spurious_interrupt_bug)
zeroentry do_spurious_interrupt_bug
+END(spurious_interrupt_bug)
#ifdef CONFIG_X86_MCE
/* runs on exception stack */
@@ -1029,6 +1053,7 @@ ENTRY(machine_check)
paranoidentry do_machine_check
jmp paranoid_exit
CFI_ENDPROC
+END(machine_check)
#endif
ENTRY(call_softirq)
@@ -1046,3 +1071,37 @@ ENTRY(call_softirq)
decl %gs:pda_irqcount
ret
CFI_ENDPROC
+ENDPROC(call_softirq)
+
+#ifdef CONFIG_STACK_UNWIND
+ENTRY(arch_unwind_init_running)
+ CFI_STARTPROC
+ movq %r15, R15(%rdi)
+ movq %r14, R14(%rdi)
+ xchgq %rsi, %rdx
+ movq %r13, R13(%rdi)
+ movq %r12, R12(%rdi)
+ xorl %eax, %eax
+ movq %rbp, RBP(%rdi)
+ movq %rbx, RBX(%rdi)
+ movq (%rsp), %rcx
+ movq %rax, R11(%rdi)
+ movq %rax, R10(%rdi)
+ movq %rax, R9(%rdi)
+ movq %rax, R8(%rdi)
+ movq %rax, RAX(%rdi)
+ movq %rax, RCX(%rdi)
+ movq %rax, RDX(%rdi)
+ movq %rax, RSI(%rdi)
+ movq %rax, RDI(%rdi)
+ movq %rax, ORIG_RAX(%rdi)
+ movq %rcx, RIP(%rdi)
+ leaq 8(%rsp), %rcx
+ movq $__KERNEL_CS, CS(%rdi)
+ movq %rax, EFLAGS(%rdi)
+ movq %rcx, RSP(%rdi)
+ movq $__KERNEL_DS, SS(%rdi)
+ jmpq *%rdx
+ CFI_ENDPROC
+ENDPROC(arch_unwind_init_running)
+#endif
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 1a2ab825be98..21c7066e236a 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
static void flat_send_IPI_allbutself(int vector)
{
-#ifndef CONFIG_HOTPLUG_CPU
- if (((num_online_cpus()) - 1) >= 1)
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+#ifdef CONFIG_HOTPLUG_CPU
+ int hotplug = 1;
#else
- cpumask_t allbutme = cpu_online_map;
+ int hotplug = 0;
+#endif
+ if (hotplug || vector == NMI_VECTOR) {
+ cpumask_t allbutme = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutme);
+ cpu_clear(smp_processor_id(), allbutme);
- if (!cpus_empty(allbutme))
- flat_send_IPI_mask(allbutme, vector);
-#endif
+ if (!cpus_empty(allbutme))
+ flat_send_IPI_mask(allbutme, vector);
+ } else if (num_online_cpus() > 1) {
+ __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+ }
}
static void flat_send_IPI_all(int vector)
{
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
+ if (vector == NMI_VECTOR)
+ flat_send_IPI_mask(cpu_online_map, vector);
+ else
+ __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
}
static int flat_apic_id_registered(void)
@@ -108,10 +115,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
static unsigned int phys_pkg_id(int index_msb)
{
- u32 ebx;
-
- ebx = cpuid_ebx(1);
- return ((ebx >> 24) & 0xFF) >> index_msb;
+ return hard_smp_processor_id() >> index_msb;
}
struct genapic apic_flat = {
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cea20a66c150..e6a71c9556d9 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -2,8 +2,6 @@
* linux/arch/x86_64/kernel/head64.c -- prepare to run common code
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
- *
- * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
*/
#include <linux/init.h>
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 5ecd34ab8c2b..3dd1659427dc 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -44,11 +44,11 @@
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
BI(x,c) BI(x,d) BI(x,e) BI(x,f)
-#define BUILD_14_IRQS(x) \
+#define BUILD_15_IRQS(x) \
BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
- BI(x,c) BI(x,d)
+ BI(x,c) BI(x,d) BI(x,e)
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
@@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
#ifdef CONFIG_PCI_MSI
- BUILD_14_IRQS(0xe)
+ BUILD_15_IRQS(0xe)
#endif
#endif
#undef BUILD_16_IRQS
-#undef BUILD_14_IRQS
+#undef BUILD_15_IRQS
#undef BI
@@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
-#define IRQLIST_14(x) \
+#define IRQLIST_15(x) \
IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
- IRQ(x,c), IRQ(x,d)
+ IRQ(x,c), IRQ(x,d), IRQ(x,e)
void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0x0),
@@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0xc), IRQLIST_16(0xd)
#ifdef CONFIG_PCI_MSI
- , IRQLIST_14(0xe)
+ , IRQLIST_15(0xe)
#endif
#endif
@@ -235,7 +235,7 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- irq_desc[irq].handler = &i8259A_irq_type;
+ irq_desc[irq].chip = &i8259A_irq_type;
enable_irq(irq);
}
@@ -278,7 +278,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
- * do without slowing down good hardware unnecesserily.
+ * do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
@@ -468,12 +468,12 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- irq_desc[i].handler = &i8259A_irq_type;
+ irq_desc[i].chip = &i8259A_irq_type;
} else {
/*
* 'high' PCI IRQs filled in on demand
*/
- irq_desc[i].handler = &no_irq_type;
+ irq_desc[i].chip = &no_irq_type;
}
}
}
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 9cc7031b7151..401b687fef21 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -41,6 +41,7 @@
#include <asm/mach_apic.h>
#include <asm/acpi.h>
#include <asm/dma.h>
+#include <asm/nmi.h>
#define __apicdebuginit __init
@@ -56,6 +57,7 @@ int timer_over_8254 __initdata = 0;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
+static DEFINE_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -317,7 +319,7 @@ void __init check_ioapic(void)
vendor &= 0xffff;
switch (vendor) {
case PCI_VENDOR_ID_VIA:
-#ifdef CONFIG_GART_IOMMU
+#ifdef CONFIG_IOMMU
if ((end_pfn > MAX_DMA32_PFN ||
force_iommu) &&
!iommu_aperture_allowed) {
@@ -834,10 +836,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
+ unsigned long flags;
+ int vector;
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
+
+ spin_lock_irqsave(&vector_lock, flags);
+
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+ spin_unlock_irqrestore(&vector_lock, flags);
return IO_APIC_VECTOR(irq);
+ }
next:
current_vector += 8;
if (current_vector == IA32_SYSCALL_VECTOR)
@@ -849,11 +858,14 @@ next:
current_vector = FIRST_DEVICE_VECTOR + offset;
}
- vector_irq[current_vector] = irq;
+ vector = current_vector;
+ vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = current_vector;
+ IO_APIC_VECTOR(irq) = vector;
- return current_vector;
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return vector;
}
extern void (*interrupt[NR_IRQS])(void);
@@ -864,23 +876,18 @@ static struct hw_interrupt_type ioapic_edge_type;
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
-static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
- if (use_pci_vector() && !platform_legacy_irq(irq)) {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[vector].handler = &ioapic_level_type;
- else
- irq_desc[vector].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[vector]);
- } else {
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
- irq_desc[irq].handler = &ioapic_level_type;
- else
- irq_desc[irq].handler = &ioapic_edge_type;
- set_intr_gate(vector, interrupt[irq]);
- }
+ unsigned idx;
+
+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
+
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+ irq_desc[idx].chip = &ioapic_level_type;
+ else
+ irq_desc[idx].chip = &ioapic_edge_type;
+ set_intr_gate(vector, interrupt[idx]);
}
static void __init setup_IO_APIC_irqs(void)
@@ -981,7 +988,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- irq_desc[0].handler = &ioapic_edge_type;
+ irq_desc[0].chip = &ioapic_edge_type;
/*
* Add it to the IO-APIC irq-routing table:
@@ -1611,6 +1618,13 @@ static void set_ioapic_affinity_vector (unsigned int vector,
#endif // CONFIG_SMP
#endif // CONFIG_PCI_MSI
+static int ioapic_retrigger(unsigned int irq)
+{
+ send_IPI_self(IO_APIC_VECTOR(irq));
+
+ return 1;
+}
+
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -1631,6 +1645,7 @@ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static struct hw_interrupt_type ioapic_level_type __read_mostly = {
@@ -1644,6 +1659,7 @@ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
#endif
+ .retrigger = ioapic_retrigger,
};
static inline void init_IO_APIC_traps(void)
@@ -1678,7 +1694,7 @@ static inline void init_IO_APIC_traps(void)
make_8259A_irq(irq);
else
/* Strange. Oh, well.. */
- irq_desc[irq].handler = &no_irq_type;
+ irq_desc[irq].chip = &no_irq_type;
}
}
}
@@ -1895,7 +1911,7 @@ static inline void check_timer(void)
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- irq_desc[0].handler = &lapic_irq_type;
+ irq_desc[0].chip = &lapic_irq_type;
apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d8bd0b345b1e..a1f1df5f7bfc 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -26,6 +26,30 @@ atomic_t irq_mis_count;
#endif
#endif
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+/*
+ * Probabilistic stack overflow check:
+ *
+ * Only check the stack in process context, because everything else
+ * runs on the big interrupt stacks. Checking reliably is too expensive,
+ * so we just check from interrupts.
+ */
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+ u64 curbase = (u64) current->thread_info;
+ static unsigned long warned = -60*HZ;
+
+ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
+ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
+ time_after(jiffies, warned + 60*HZ)) {
+ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
+ current->comm, curbase, regs->rsp);
+ show_stack(NULL,NULL);
+ warned = jiffies;
+ }
+}
+#endif
+
/*
* Generic, controller-independent functions:
*/
@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
@@ -55,7 +79,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -91,12 +115,20 @@ skip:
*/
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- unsigned irq = regs->orig_rax & 0xff;
+ /* high bit used in ret_from_ code */
+ unsigned irq = ~regs->orig_rax;
+
+ if (unlikely(irq >= NR_IRQS)) {
+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+ __FUNCTION__, irq);
+ BUG();
+ }
exit_idle();
irq_enter();
-
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ stack_overflow_check(regs);
+#endif
__do_IRQ(irq, regs);
irq_exit();
@@ -114,13 +146,13 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].handler->set_affinity)
- irq_desc[irq].handler->set_affinity(irq, mask);
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
new file mode 100644
index 000000000000..6416682d33d0
--- /dev/null
+++ b/arch/x86_64/kernel/k8.c
@@ -0,0 +1,118 @@
+/*
+ * Shared support code for AMD K8 northbridges and derivates.
+ * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
+ */
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/k8.h>
+
+int num_k8_northbridges;
+EXPORT_SYMBOL(num_k8_northbridges);
+
+static u32 *flush_words;
+
+struct pci_device_id k8_nb_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
+ {}
+};
+EXPORT_SYMBOL(k8_nb_ids);
+
+struct pci_dev **k8_northbridges;
+EXPORT_SYMBOL(k8_northbridges);
+
+static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
+{
+ do {
+ dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
+ if (!dev)
+ break;
+ } while (!pci_match_id(&k8_nb_ids[0], dev));
+ return dev;
+}
+
+int cache_k8_northbridges(void)
+{
+ int i;
+ struct pci_dev *dev;
+ if (num_k8_northbridges)
+ return 0;
+
+ num_k8_northbridges = 0;
+ dev = NULL;
+ while ((dev = next_k8_northbridge(dev)) != NULL)
+ num_k8_northbridges++;
+
+ k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
+ GFP_KERNEL);
+ if (!k8_northbridges)
+ return -ENOMEM;
+
+ flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
+ if (!flush_words) {
+ kfree(k8_northbridges);
+ return -ENOMEM;
+ }
+
+ dev = NULL;
+ i = 0;
+ while ((dev = next_k8_northbridge(dev)) != NULL) {
+ k8_northbridges[i++] = dev;
+ pci_read_config_dword(dev, 0x9c, &flush_words[i]);
+ }
+ k8_northbridges[i] = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cache_k8_northbridges);
+
+/* Ignores subdevice/subvendor but as far as I can figure out
+ they're useless anyways */
+int __init early_is_k8_nb(u32 device)
+{
+ struct pci_device_id *id;
+ u32 vendor = device & 0xffff;
+ device >>= 16;
+ for (id = k8_nb_ids; id->vendor; id++)
+ if (vendor == id->vendor && device == id->device)
+ return 1;
+ return 0;
+}
+
+void k8_flush_garts(void)
+{
+ int flushed, i;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(gart_lock);
+
+ /* Avoid races between AGP and IOMMU. In theory it's not needed
+ but I'm not sure if the hardware won't lose flush requests
+ when another is pending. This whole thing is so expensive anyways
+ that it doesn't matter to serialize more. -AK */
+ spin_lock_irqsave(&gart_lock, flags);
+ flushed = 0;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ pci_write_config_dword(k8_northbridges[i], 0x9c,
+ flush_words[i]|1);
+ flushed++;
+ }
+ for (i = 0; i < num_k8_northbridges; i++) {
+ u32 w;
+ /* Make sure the hardware actually executed the flush*/
+ for (;;) {
+ pci_read_config_dword(k8_northbridges[i],
+ 0x9c, &w);
+ if (!(w & 1))
+ break;
+ cpu_relax();
+ }
+ }
+ spin_unlock_irqrestore(&gart_lock, flags);
+ if (!flushed)
+ printk("nothing to flush?\n");
+}
+EXPORT_SYMBOL_GPL(k8_flush_garts);
+
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 25ac8a3faae6..83fb24a02821 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -149,8 +149,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long start_address,
unsigned long pgtable) ATTRIB_NORET;
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned long relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned long relocate_new_kernel_size;
int machine_kexec_prepare(struct kimage *image)
{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index c69fc43cee7b..88845674c661 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = {
set_kset_name("machinecheck"),
};
-static DEFINE_PER_CPU(struct sys_device, device_mce);
+DEFINE_PER_CPU(struct sys_device, device_mce);
/* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
#endif
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
+static __cpuinit int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block mce_cpu_notifier = {
+static struct notifier_block __cpuinitdata mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d13b241ad094..335200aa2737 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -1,5 +1,5 @@
/*
- * (c) 2005 Advanced Micro Devices, Inc.
+ * (c) 2005, 2006 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
@@ -8,9 +8,10 @@
*
* Support : jacob.shin@amd.com
*
- * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
- * MC4_MISC0 exists per physical processor.
+ * April 2006
+ * - added support for AMD Family 0x10 processors
*
+ * All MC4_MISCi registers are shared between multi-cores
*/
#include <linux/cpu.h>
@@ -29,32 +30,45 @@
#include <asm/percpu.h>
#include <asm/idle.h>
-#define PFX "mce_threshold: "
-#define VERSION "version 1.00.9"
-#define NR_BANKS 5
-#define THRESHOLD_MAX 0xFFF
-#define INT_TYPE_APIC 0x00020000
-#define MASK_VALID_HI 0x80000000
-#define MASK_LVTOFF_HI 0x00F00000
-#define MASK_COUNT_EN_HI 0x00080000
-#define MASK_INT_TYPE_HI 0x00060000
-#define MASK_OVERFLOW_HI 0x00010000
+#define PFX "mce_threshold: "
+#define VERSION "version 1.1.1"
+#define NR_BANKS 6
+#define NR_BLOCKS 9
+#define THRESHOLD_MAX 0xFFF
+#define INT_TYPE_APIC 0x00020000
+#define MASK_VALID_HI 0x80000000
+#define MASK_LVTOFF_HI 0x00F00000
+#define MASK_COUNT_EN_HI 0x00080000
+#define MASK_INT_TYPE_HI 0x00060000
+#define MASK_OVERFLOW_HI 0x00010000
#define MASK_ERR_COUNT_HI 0x00000FFF
-#define MASK_OVERFLOW 0x0001000000000000L
+#define MASK_BLKPTR_LO 0xFF000000
+#define MCG_XBLK_ADDR 0xC0000400
-struct threshold_bank {
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
unsigned int cpu;
- u8 bank;
- u8 interrupt_enable;
+ u32 address;
+ u16 interrupt_enable;
u16 threshold_limit;
struct kobject kobj;
+ struct list_head miscj;
};
-static struct threshold_bank threshold_defaults = {
+/* defaults used early on boot */
+static struct threshold_block threshold_defaults = {
.interrupt_enable = 0,
.threshold_limit = THRESHOLD_MAX,
};
+struct threshold_bank {
+ struct kobject kobj;
+ struct threshold_block *blocks;
+ cpumask_t cpus;
+};
+static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
+
#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
@@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
*/
/* must be called with correct cpu affinity */
-static void threshold_restart_bank(struct threshold_bank *b,
+static void threshold_restart_bank(struct threshold_block *b,
int reset, u16 old_limit)
{
u32 mci_misc_hi, mci_misc_lo;
- rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+ rdmsr(b->address, mci_misc_lo, mci_misc_hi);
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
reset = 1; /* limit cannot be lower than err count */
@@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b,
(mci_misc_hi &= ~MASK_INT_TYPE_HI);
mci_misc_hi |= MASK_COUNT_EN_HI;
- wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
+ wrmsr(b->address, mci_misc_lo, mci_misc_hi);
}
+/* cpu init entry point, called from mce.c with preempt off */
void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- int bank;
- u32 mci_misc_lo, mci_misc_hi;
+ unsigned int bank, block;
unsigned int cpu = smp_processor_id();
+ u32 low = 0, high = 0, address = 0;
for (bank = 0; bank < NR_BANKS; ++bank) {
- rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
+ for (block = 0; block < NR_BLOCKS; ++block) {
+ if (block == 0)
+ address = MSR_IA32_MC0_MISC + bank * 4;
+ else if (block == 1)
+ address = MCG_XBLK_ADDR
+ + ((low & MASK_BLKPTR_LO) >> 21);
+ else
+ ++address;
+
+ if (rdmsr_safe(address, &low, &high))
+ continue;
- /* !valid, !counter present, bios locked */
- if (!(mci_misc_hi & MASK_VALID_HI) ||
- !(mci_misc_hi & MASK_VALID_HI >> 1) ||
- (mci_misc_hi & MASK_VALID_HI >> 2))
- continue;
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ continue;
+ else
+ break;
+ }
- per_cpu(bank_map, cpu) |= (1 << bank);
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ continue;
+ if (!block)
+ per_cpu(bank_map, cpu) |= (1 << bank);
#ifdef CONFIG_SMP
- if (shared_bank[bank] && cpu_core_id[cpu])
- continue;
+ if (shared_bank[bank] && c->cpu_core_id)
+ break;
#endif
+ high &= ~MASK_LVTOFF_HI;
+ high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
+ wrmsr(address, low, high);
- setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
- threshold_defaults.cpu = cpu;
- threshold_defaults.bank = bank;
- threshold_restart_bank(&threshold_defaults, 0, 0);
+ setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
+ THRESHOLD_APIC_VECTOR,
+ K8_APIC_EXT_INT_MSG_FIX, 0);
+
+ threshold_defaults.address = address;
+ threshold_restart_bank(&threshold_defaults, 0, 0);
+ }
}
}
@@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
*/
asmlinkage void mce_threshold_interrupt(void)
{
- int bank;
+ unsigned int bank, block;
struct mce m;
+ u32 low = 0, high = 0, address = 0;
ack_APIC_irq();
exit_idle();
@@ -150,15 +187,42 @@ asmlinkage void mce_threshold_interrupt(void)
/* assume first bank caused it */
for (bank = 0; bank < NR_BANKS; ++bank) {
- m.bank = MCE_THRESHOLD_BASE + bank;
- rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
+ for (block = 0; block < NR_BLOCKS; ++block) {
+ if (block == 0)
+ address = MSR_IA32_MC0_MISC + bank * 4;
+ else if (block == 1)
+ address = MCG_XBLK_ADDR
+ + ((low & MASK_BLKPTR_LO) >> 21);
+ else
+ ++address;
+
+ if (rdmsr_safe(address, &low, &high))
+ continue;
- if (m.misc & MASK_OVERFLOW) {
- mce_log(&m);
- goto out;
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ continue;
+ else
+ break;
+ }
+
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ continue;
+
+ if (high & MASK_OVERFLOW_HI) {
+ rdmsrl(address, m.misc);
+ rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
+ m.status);
+ m.bank = K8_MCE_THRESHOLD_BASE
+ + bank * NR_BLOCKS
+ + block;
+ mce_log(&m);
+ goto out;
+ }
}
}
- out:
+out:
irq_exit();
}
@@ -166,20 +230,12 @@ asmlinkage void mce_threshold_interrupt(void)
* Sysfs Interface
*/
-static struct sysdev_class threshold_sysclass = {
- set_kset_name("threshold"),
-};
-
-static DEFINE_PER_CPU(struct sys_device, device_threshold);
-
struct threshold_attr {
- struct attribute attr;
- ssize_t(*show) (struct threshold_bank *, char *);
- ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
+ struct attribute attr;
+ ssize_t(*show) (struct threshold_block *, char *);
+ ssize_t(*store) (struct threshold_block *, const char *, size_t count);
};
-static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
-
static cpumask_t affinity_set(unsigned int cpu)
{
cpumask_t oldmask = current->cpus_allowed;
@@ -194,15 +250,15 @@ static void affinity_restore(cpumask_t oldmask)
set_cpus_allowed(current, oldmask);
}
-#define SHOW_FIELDS(name) \
- static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
- { \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
- }
+#define SHOW_FIELDS(name) \
+static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
+{ \
+ return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+}
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
-static ssize_t store_interrupt_enable(struct threshold_bank *b,
+static ssize_t store_interrupt_enable(struct threshold_block *b,
const char *buf, size_t count)
{
char *end;
@@ -219,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b,
return end - buf;
}
-static ssize_t store_threshold_limit(struct threshold_bank *b,
+static ssize_t store_threshold_limit(struct threshold_block *b,
const char *buf, size_t count)
{
char *end;
@@ -242,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b,
return end - buf;
}
-static ssize_t show_error_count(struct threshold_bank *b, char *buf)
+static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
u32 high, low;
cpumask_t oldmask;
oldmask = affinity_set(b->cpu);
- rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
+ rdmsr(b->address, low, high);
affinity_restore(oldmask);
return sprintf(buf, "%x\n",
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
}
-static ssize_t store_error_count(struct threshold_bank *b,
+static ssize_t store_error_count(struct threshold_block *b,
const char *buf, size_t count)
{
cpumask_t oldmask;
@@ -269,13 +325,13 @@ static ssize_t store_error_count(struct threshold_bank *b,
.store = _store, \
};
-#define ATTR_FIELDS(name) \
- static struct threshold_attr name = \
+#define RW_ATTR(name) \
+static struct threshold_attr name = \
THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
-ATTR_FIELDS(interrupt_enable);
-ATTR_FIELDS(threshold_limit);
-ATTR_FIELDS(error_count);
+RW_ATTR(interrupt_enable);
+RW_ATTR(threshold_limit);
+RW_ATTR(error_count);
static struct attribute *default_attrs[] = {
&interrupt_enable.attr,
@@ -284,12 +340,12 @@ static struct attribute *default_attrs[] = {
NULL
};
-#define to_bank(k) container_of(k,struct threshold_bank,kobj)
-#define to_attr(a) container_of(a,struct threshold_attr,attr)
+#define to_block(k) container_of(k, struct threshold_block, kobj)
+#define to_attr(a) container_of(a, struct threshold_attr, attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
- struct threshold_bank *b = to_bank(kobj);
+ struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->show ? a->show(b, buf) : -EIO;
@@ -299,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
- struct threshold_bank *b = to_bank(kobj);
+ struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->store ? a->store(b, buf, count) : -EIO;
@@ -316,69 +372,174 @@ static struct kobj_type threshold_ktype = {
.default_attrs = default_attrs,
};
+static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
+ unsigned int bank,
+ unsigned int block,
+ u32 address)
+{
+ int err;
+ u32 low, high;
+ struct threshold_block *b = NULL;
+
+ if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
+ return 0;
+
+ if (rdmsr_safe(address, &low, &high))
+ goto recurse;
+
+ if (!(high & MASK_VALID_HI)) {
+ if (block)
+ goto recurse;
+ else
+ return 0;
+ }
+
+ if (!(high & MASK_VALID_HI >> 1) ||
+ (high & MASK_VALID_HI >> 2))
+ goto recurse;
+
+ b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+ memset(b, 0, sizeof(struct threshold_block));
+
+ b->block = block;
+ b->bank = bank;
+ b->cpu = cpu;
+ b->address = address;
+ b->interrupt_enable = 0;
+ b->threshold_limit = THRESHOLD_MAX;
+
+ INIT_LIST_HEAD(&b->miscj);
+
+ if (per_cpu(threshold_banks, cpu)[bank]->blocks)
+ list_add(&b->miscj,
+ &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
+ else
+ per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+
+ kobject_set_name(&b->kobj, "misc%i", block);
+ b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
+ b->kobj.ktype = &threshold_ktype;
+ err = kobject_register(&b->kobj);
+ if (err)
+ goto out_free;
+recurse:
+ if (!block) {
+ address = (low & MASK_BLKPTR_LO) >> 21;
+ if (!address)
+ return 0;
+ address += MCG_XBLK_ADDR;
+ } else
+ ++address;
+
+ err = allocate_threshold_blocks(cpu, bank, ++block, address);
+ if (err)
+ goto out_free;
+
+ return err;
+
+out_free:
+ if (b) {
+ kobject_unregister(&b->kobj);
+ kfree(b);
+ }
+ return err;
+}
+
/* symlinks sibling shared banks to first core. first core owns dir/files. */
-static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
+static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{
- int err = 0;
+ int i, err = 0;
struct threshold_bank *b = NULL;
+ cpumask_t oldmask = CPU_MASK_NONE;
+ char name[32];
+
+ sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
- if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
- char name[16];
- unsigned lcpu = first_cpu(cpu_core_map[cpu]);
- if (cpu_core_id[lcpu])
- goto out; /* first core not up yet */
+ if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
+ i = first_cpu(cpu_core_map[cpu]);
+
+ /* first core not up yet */
+ if (cpu_data[i].cpu_core_id)
+ goto out;
+
+ /* already linked */
+ if (per_cpu(threshold_banks, cpu)[bank])
+ goto out;
+
+ b = per_cpu(threshold_banks, i)[bank];
- b = per_cpu(threshold_banks, lcpu)[bank];
if (!b)
goto out;
- sprintf(name, "bank%i", bank);
- err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj,
+
+ err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
&b->kobj, name);
if (err)
goto out;
+
+ b->cpus = cpu_core_map[cpu];
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
#endif
- b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
+ b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
err = -ENOMEM;
goto out;
}
memset(b, 0, sizeof(struct threshold_bank));
- b->cpu = cpu;
- b->bank = bank;
- b->interrupt_enable = 0;
- b->threshold_limit = THRESHOLD_MAX;
- kobject_set_name(&b->kobj, "bank%i", bank);
- b->kobj.parent = &per_cpu(device_threshold, cpu).kobj;
- b->kobj.ktype = &threshold_ktype;
-
+ kobject_set_name(&b->kobj, "threshold_bank%i", bank);
+ b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
+#ifndef CONFIG_SMP
+ b->cpus = CPU_MASK_ALL;
+#else
+ b->cpus = cpu_core_map[cpu];
+#endif
err = kobject_register(&b->kobj);
- if (err) {
- kfree(b);
- goto out;
- }
+ if (err)
+ goto out_free;
+
per_cpu(threshold_banks, cpu)[bank] = b;
- out:
+
+ oldmask = affinity_set(cpu);
+ err = allocate_threshold_blocks(cpu, bank, 0,
+ MSR_IA32_MC0_MISC + bank * 4);
+ affinity_restore(oldmask);
+
+ if (err)
+ goto out_free;
+
+ for_each_cpu_mask(i, b->cpus) {
+ if (i == cpu)
+ continue;
+
+ err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
+ &b->kobj, name);
+ if (err)
+ goto out;
+
+ per_cpu(threshold_banks, i)[bank] = b;
+ }
+
+ goto out;
+
+out_free:
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
+ kfree(b);
+out:
return err;
}
/* create dir/files for all valid threshold banks */
static __cpuinit int threshold_create_device(unsigned int cpu)
{
- int bank;
+ unsigned int bank;
int err = 0;
- per_cpu(device_threshold, cpu).id = cpu;
- per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
- err = sysdev_register(&per_cpu(device_threshold, cpu));
- if (err)
- goto out;
-
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
@@ -386,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
if (err)
goto out;
}
- out:
+out:
return err;
}
@@ -397,92 +558,85 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
*/
-/* cpu hotplug call removes all symlinks before first core dies */
+static __cpuinit void deallocate_threshold_block(unsigned int cpu,
+ unsigned int bank)
+{
+ struct threshold_block *pos = NULL;
+ struct threshold_block *tmp = NULL;
+ struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
+
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+ kobject_unregister(&pos->kobj);
+ list_del(&pos->miscj);
+ kfree(pos);
+ }
+
+ kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
+ per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
+}
+
static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
{
+ int i = 0;
struct threshold_bank *b;
- char name[16];
+ char name[32];
b = per_cpu(threshold_banks, cpu)[bank];
+
if (!b)
return;
- if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
- sprintf(name, "bank%i", bank);
- sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- } else {
- kobject_unregister(&b->kobj);
- kfree(per_cpu(threshold_banks, cpu)[bank]);
+
+ if (!b->blocks)
+ goto free_out;
+
+ sprintf(name, "threshold_bank%i", bank);
+
+ /* sibling symlink */
+ if (shared_bank[bank] && b->blocks->cpu != cpu) {
+ sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
+ per_cpu(threshold_banks, i)[bank] = NULL;
+ return;
+ }
+
+ /* remove all sibling symlinks before unregistering */
+ for_each_cpu_mask(i, b->cpus) {
+ if (i == cpu)
+ continue;
+
+ sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
+ per_cpu(threshold_banks, i)[bank] = NULL;
}
+
+ deallocate_threshold_block(cpu, bank);
+
+free_out:
+ kobject_unregister(&b->kobj);
+ kfree(b);
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
}
static __cpuinit void threshold_remove_device(unsigned int cpu)
{
- int bank;
+ unsigned int bank;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
threshold_remove_bank(cpu, bank);
}
- sysdev_unregister(&per_cpu(device_threshold, cpu));
}
-/* link all existing siblings when first core comes up */
-static __cpuinit int threshold_create_symlinks(unsigned int cpu)
-{
- int bank, err = 0;
- unsigned int lcpu = 0;
-
- if (cpu_core_id[cpu])
- return 0;
- for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
- if (lcpu == cpu)
- continue;
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & 1 << bank))
- continue;
- if (!shared_bank[bank])
- continue;
- err = threshold_create_bank(lcpu, bank);
- }
- }
- return err;
-}
-
-/* remove all symlinks before first core dies. */
-static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
-{
- int bank;
- unsigned int lcpu = 0;
- if (cpu_core_id[cpu])
- return;
- for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
- if (lcpu == cpu)
- continue;
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & 1 << bank))
- continue;
- if (!shared_bank[bank])
- continue;
- threshold_remove_bank(lcpu, bank);
- }
- }
-}
#else /* !CONFIG_HOTPLUG_CPU */
-static __cpuinit void threshold_create_symlinks(unsigned int cpu)
-{
-}
-static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
-{
-}
static void threshold_remove_device(unsigned int cpu)
{
}
#endif
/* get notified when a cpu comes on/off */
-static int threshold_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/* cpu was unsigned int to begin with */
@@ -494,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
threshold_create_device(cpu);
- threshold_create_symlinks(cpu);
- break;
- case CPU_DOWN_PREPARE:
- threshold_remove_symlinks(cpu);
- break;
- case CPU_DOWN_FAILED:
- threshold_create_symlinks(cpu);
break;
case CPU_DEAD:
threshold_remove_device(cpu);
@@ -512,29 +659,22 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block threshold_cpu_notifier = {
+static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
.notifier_call = threshold_cpu_callback,
};
static __init int threshold_init_device(void)
{
- int err;
- int lcpu = 0;
-
- err = sysdev_class_register(&threshold_sysclass);
- if (err)
- goto out;
+ unsigned lcpu = 0;
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
- err = threshold_create_device(lcpu);
+ int err = threshold_create_device(lcpu);
if (err)
- goto out;
+ return err;
}
register_cpu_notifier(&threshold_cpu_notifier);
-
- out:
- return err;
+ return 0;
}
device_initcall(threshold_init_device);
diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c
index bac195c74bcc..9d0958ff547f 100644
--- a/arch/x86_64/kernel/module.c
+++ b/arch/x86_64/kernel/module.c
@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs,
return -ENOSYS;
}
-extern void apply_alternatives(void *start, void *end);
-
int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
+ const Elf_Shdr *sechdrs,
+ struct module *me)
{
- const Elf_Shdr *s;
+ const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- /* look for .altinstructions to patch */
- for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
- void *seg;
- if (strcmp(".altinstructions", secstrings + s->sh_name))
- continue;
- seg = (void *)s->sh_addr;
- apply_alternatives(seg, seg + s->sh_size);
- }
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".text", secstrings + s->sh_name))
+ text = s;
+ if (!strcmp(".altinstructions", secstrings + s->sh_name))
+ alt = s;
+ if (!strcmp(".smp_locks", secstrings + s->sh_name))
+ locks= s;
+ }
+
+ if (alt) {
+ /* patch .altinstructions */
+ void *aseg = (void *)alt->sh_addr;
+ apply_alternatives(aseg, aseg + alt->sh_size);
+ }
+ if (locks && text) {
+ void *lseg = (void *)locks->sh_addr;
+ void *tseg = (void *)text->sh_addr;
+ alternatives_smp_module_add(me, me->name,
+ lseg, lseg + locks->sh_size,
+ tseg, tseg + text->sh_size);
+ }
return 0;
}
void module_arch_cleanup(struct module *mod)
{
+ alternatives_smp_module_del(mod);
}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 4e6357fe0ec3..0ef9cf2bc45e 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -15,11 +15,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/sysdev.h>
#include <linux/nmi.h>
@@ -27,14 +23,11 @@
#include <linux/kprobes.h>
#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
#include <asm/nmi.h>
-#include <asm/msr.h>
#include <asm/proto.h>
#include <asm/kdebug.h>
-#include <asm/local.h>
#include <asm/mce.h>
+#include <asm/intel_arch_perfmon.h>
/*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -74,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
+#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
#define MSR_P4_MISC_ENABLE 0x1A0
#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -105,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
case X86_VENDOR_AMD:
return boot_cpu_data.x86 == 15;
case X86_VENDOR_INTEL:
- return boot_cpu_data.x86 == 15;
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return 1;
+ else
+ return (boot_cpu_data.x86 == 15);
}
return 0;
}
@@ -211,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog);
+static void disable_intel_arch_watchdog(void);
+
static void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
@@ -223,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
if (boot_cpu_data.x86 == 15) {
wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
+ } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ disable_intel_arch_watchdog();
}
break;
}
@@ -375,6 +378,53 @@ static void setup_k7_watchdog(void)
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
+static void disable_intel_arch_watchdog(void)
+{
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
+}
+
+static int setup_intel_arch_watchdog(void)
+{
+ unsigned int evntsel;
+ unsigned ebx;
+
+ /*
+ * Check whether the Architectural PerfMon supports
+ * Unhalted Core Cycles Event or not.
+ * NOTE: Corresponding bit = 0 in ebp indicates event present.
+ */
+ ebx = cpuid_ebx(10);
+ if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+ return 0;
+
+ nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
+
+ clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
+ clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
+
+ evntsel = ARCH_PERFMON_EVENTSEL_INT
+ | ARCH_PERFMON_EVENTSEL_OS
+ | ARCH_PERFMON_EVENTSEL_USR
+ | ARCH_PERFMON_NMI_EVENT_SEL
+ | ARCH_PERFMON_NMI_EVENT_UMASK;
+
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
+ return 1;
+}
+
static int setup_p4_watchdog(void)
{
@@ -428,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
setup_k7_watchdog();
break;
case X86_VENDOR_INTEL:
- if (boot_cpu_data.x86 != 15)
- return;
- if (!setup_p4_watchdog())
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ if (!setup_intel_arch_watchdog())
+ return;
+ } else if (boot_cpu_data.x86 == 15) {
+ if (!setup_p4_watchdog())
+ return;
+ } else {
return;
+ }
+
break;
default:
@@ -516,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
*/
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI);
- }
+ } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
+ /*
+ * For Intel based architectural perfmon
+ * - LVTPC is masked on interrupt and must be
+ * unmasked by the LVTPC handler.
+ */
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ }
wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
}
}
@@ -544,11 +607,13 @@ void set_nmi_callback(nmi_callback_t callback)
vmalloc_sync_all();
rcu_assign_pointer(nmi_callback, callback);
}
+EXPORT_SYMBOL_GPL(set_nmi_callback);
void unset_nmi_callback(void)
{
nmi_callback = dummy_nmi_callback;
}
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
#ifdef CONFIG_SYSCTL
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
new file mode 100644
index 000000000000..d91cb843f54d
--- /dev/null
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -0,0 +1,1018 @@
+/*
+ * Derived from arch/powerpc/kernel/iommu.c
+ *
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/proto.h>
+#include <asm/calgary.h>
+#include <asm/tce.h>
+#include <asm/pci-direct.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+
+#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
+#define PCI_VENDOR_DEVICE_ID_CALGARY \
+ (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
+
+/* we need these for register space address calculation */
+#define START_ADDRESS 0xfe000000
+#define CHASSIS_BASE 0
+#define ONE_BASED_CHASSIS_NUM 1
+
+/* register offsets inside the host bridge space */
+#define PHB_CSR_OFFSET 0x0110
+#define PHB_PLSSR_OFFSET 0x0120
+#define PHB_CONFIG_RW_OFFSET 0x0160
+#define PHB_IOBASE_BAR_LOW 0x0170
+#define PHB_IOBASE_BAR_HIGH 0x0180
+#define PHB_MEM_1_LOW 0x0190
+#define PHB_MEM_1_HIGH 0x01A0
+#define PHB_IO_ADDR_SIZE 0x01B0
+#define PHB_MEM_1_SIZE 0x01C0
+#define PHB_MEM_ST_OFFSET 0x01D0
+#define PHB_AER_OFFSET 0x0200
+#define PHB_CONFIG_0_HIGH 0x0220
+#define PHB_CONFIG_0_LOW 0x0230
+#define PHB_CONFIG_0_END 0x0240
+#define PHB_MEM_2_LOW 0x02B0
+#define PHB_MEM_2_HIGH 0x02C0
+#define PHB_MEM_2_SIZE_HIGH 0x02D0
+#define PHB_MEM_2_SIZE_LOW 0x02E0
+#define PHB_DOSHOLE_OFFSET 0x08E0
+
+/* PHB_CONFIG_RW */
+#define PHB_TCE_ENABLE 0x20000000
+#define PHB_SLOT_DISABLE 0x1C000000
+#define PHB_DAC_DISABLE 0x01000000
+#define PHB_MEM2_ENABLE 0x00400000
+#define PHB_MCSR_ENABLE 0x00100000
+/* TAR (Table Address Register) */
+#define TAR_SW_BITS 0x0000ffffffff800fUL
+#define TAR_VALID 0x0000000000000008UL
+/* CSR (Channel/DMA Status Register) */
+#define CSR_AGENT_MASK 0xffe0ffff
+
+#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
+#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */
+#define PHBS_PER_CALGARY 4
+
+/* register offsets in Calgary's internal register space */
+static const unsigned long tar_offsets[] = {
+ 0x0580 /* TAR0 */,
+ 0x0588 /* TAR1 */,
+ 0x0590 /* TAR2 */,
+ 0x0598 /* TAR3 */
+};
+
+static const unsigned long split_queue_offsets[] = {
+ 0x4870 /* SPLIT QUEUE 0 */,
+ 0x5870 /* SPLIT QUEUE 1 */,
+ 0x6870 /* SPLIT QUEUE 2 */,
+ 0x7870 /* SPLIT QUEUE 3 */
+};
+
+static const unsigned long phb_offsets[] = {
+ 0x8000 /* PHB0 */,
+ 0x9000 /* PHB1 */,
+ 0xA000 /* PHB2 */,
+ 0xB000 /* PHB3 */
+};
+
+void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES];
+unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
+static int translate_empty_slots __read_mostly = 0;
+static int calgary_detected __read_mostly = 0;
+
+/*
+ * the bitmap of PHBs the user requested that we disable
+ * translation on.
+ */
+static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM);
+
+static void tce_cache_blast(struct iommu_table *tbl);
+
+/* enable this to stress test the chip's TCE cache */
+#ifdef CONFIG_IOMMU_DEBUG
+static inline void tce_cache_blast_stress(struct iommu_table *tbl)
+{
+ tce_cache_blast(tbl);
+}
+#else
+static inline void tce_cache_blast_stress(struct iommu_table *tbl)
+{
+}
+#endif /* BLAST_TCE_CACHE_ON_UNMAP */
+
+static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
+{
+ unsigned int npages;
+
+ npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
+ npages >>= PAGE_SHIFT;
+
+ return npages;
+}
+
+static inline int translate_phb(struct pci_dev* dev)
+{
+ int disabled = test_bit(dev->bus->number, translation_disabled);
+ return !disabled;
+}
+
+static void iommu_range_reserve(struct iommu_table *tbl,
+ unsigned long start_addr, unsigned int npages)
+{
+ unsigned long index;
+ unsigned long end;
+
+ index = start_addr >> PAGE_SHIFT;
+
+ /* bail out if we're asked to reserve a region we don't cover */
+ if (index >= tbl->it_size)
+ return;
+
+ end = index + npages;
+ if (end > tbl->it_size) /* don't go off the table */
+ end = tbl->it_size;
+
+ while (index < end) {
+ if (test_bit(index, tbl->it_map))
+ printk(KERN_ERR "Calgary: entry already allocated at "
+ "0x%lx tbl %p dma 0x%lx npages %u\n",
+ index, tbl, start_addr, npages);
+ ++index;
+ }
+ set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages);
+}
+
+static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+ unsigned int npages)
+{
+ unsigned long offset;
+
+ BUG_ON(npages == 0);
+
+ offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
+ tbl->it_size, npages);
+ if (offset == ~0UL) {
+ tce_cache_blast(tbl);
+ offset = find_next_zero_string(tbl->it_map, 0,
+ tbl->it_size, npages);
+ if (offset == ~0UL) {
+ printk(KERN_WARNING "Calgary: IOMMU full.\n");
+ if (panic_on_overflow)
+ panic("Calgary: fix the allocator.\n");
+ else
+ return bad_dma_address;
+ }
+ }
+
+ set_bit_string(tbl->it_map, offset, npages);
+ tbl->it_hint = offset + npages;
+ BUG_ON(tbl->it_hint > tbl->it_size);
+
+ return offset;
+}
+
+static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
+ unsigned int npages, int direction)
+{
+ unsigned long entry, flags;
+ dma_addr_t ret = bad_dma_address;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ entry = iommu_range_alloc(tbl, npages);
+
+ if (unlikely(entry == bad_dma_address))
+ goto error;
+
+ /* set the return dma address */
+ ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
+
+ /* put the TCEs in the HW table */
+ tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
+ direction);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+
+ return ret;
+
+error:
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+ printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
+ "iommu %p\n", npages, tbl);
+ return bad_dma_address;
+}
+
+static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
+{
+ unsigned long entry;
+ unsigned long i;
+
+ entry = dma_addr >> PAGE_SHIFT;
+
+ BUG_ON(entry + npages > tbl->it_size);
+
+ tce_free(tbl, entry, npages);
+
+ for (i = 0; i < npages; ++i) {
+ if (!test_bit(entry + i, tbl->it_map))
+ printk(KERN_ERR "Calgary: bit is off at 0x%lx "
+ "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
+ entry + i, tbl, dma_addr, entry, npages);
+ }
+
+ __clear_bit_string(tbl->it_map, entry, npages);
+
+ tce_cache_blast_stress(tbl);
+}
+
+static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ __iommu_free(tbl, dma_addr, npages);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+}
+
+static void __calgary_unmap_sg(struct iommu_table *tbl,
+ struct scatterlist *sglist, int nelems, int direction)
+{
+ while (nelems--) {
+ unsigned int npages;
+ dma_addr_t dma = sglist->dma_address;
+ unsigned int dmalen = sglist->dma_length;
+
+ if (dmalen == 0)
+ break;
+
+ npages = num_dma_pages(dma, dmalen);
+ __iommu_free(tbl, dma, npages);
+ sglist++;
+ }
+}
+
+void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, int direction)
+{
+ unsigned long flags;
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return;
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ __calgary_unmap_sg(tbl, sglist, nelems, direction);
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+}
+
+static int calgary_nontranslate_map_sg(struct device* dev,
+ struct scatterlist *sg, int nelems, int direction)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ s->dma_length = s->length;
+ }
+ return nelems;
+}
+
+int calgary_map_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+ unsigned long flags;
+ unsigned long vaddr;
+ unsigned int npages;
+ unsigned long entry;
+ int i;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
+
+ spin_lock_irqsave(&tbl->it_lock, flags);
+
+ for (i = 0; i < nelems; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+
+ vaddr = (unsigned long)page_address(s->page) + s->offset;
+ npages = num_dma_pages(vaddr, s->length);
+
+ entry = iommu_range_alloc(tbl, npages);
+ if (entry == bad_dma_address) {
+ /* makes sure unmap knows to stop */
+ s->dma_length = 0;
+ goto error;
+ }
+
+ s->dma_address = (entry << PAGE_SHIFT) | s->offset;
+
+ /* insert into HW table */
+ tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
+ direction);
+
+ s->dma_length = s->length;
+ }
+
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+
+ return nelems;
+error:
+ __calgary_unmap_sg(tbl, sg, nelems, direction);
+ for (i = 0; i < nelems; i++) {
+ sg[i].dma_address = bad_dma_address;
+ sg[i].dma_length = 0;
+ }
+ spin_unlock_irqrestore(&tbl->it_lock, flags);
+ return 0;
+}
+
+dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
+ size_t size, int direction)
+{
+ dma_addr_t dma_handle = bad_dma_address;
+ unsigned long uaddr;
+ unsigned int npages;
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ uaddr = (unsigned long)vaddr;
+ npages = num_dma_pages(uaddr, size);
+
+ if (translate_phb(to_pci_dev(dev)))
+ dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
+ else
+ dma_handle = virt_to_bus(vaddr);
+
+ return dma_handle;
+}
+
+void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
+ unsigned int npages;
+
+ if (!translate_phb(to_pci_dev(dev)))
+ return;
+
+ npages = num_dma_pages(dma_handle, size);
+ iommu_free(tbl, dma_handle, npages);
+}
+
+void* calgary_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = NULL;
+ dma_addr_t mapping;
+ unsigned int npages, order;
+ struct iommu_table *tbl;
+
+ tbl = to_pci_dev(dev)->bus->self->sysdata;
+
+ size = PAGE_ALIGN(size); /* size rounded up to full pages */
+ npages = size >> PAGE_SHIFT;
+ order = get_order(size);
+
+ /* alloc enough pages (and possibly more) */
+ ret = (void *)__get_free_pages(flag, order);
+ if (!ret)
+ goto error;
+ memset(ret, 0, size);
+
+ if (translate_phb(to_pci_dev(dev))) {
+ /* set up tces to cover the allocated range */
+ mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
+ if (mapping == bad_dma_address)
+ goto free;
+
+ *dma_handle = mapping;
+ } else /* non translated slot */
+ *dma_handle = virt_to_bus(ret);
+
+ return ret;
+
+free:
+ free_pages((unsigned long)ret, get_order(size));
+ ret = NULL;
+error:
+ return ret;
+}
+
+static struct dma_mapping_ops calgary_dma_ops = {
+ .alloc_coherent = calgary_alloc_coherent,
+ .map_single = calgary_map_single,
+ .unmap_single = calgary_unmap_single,
+ .map_sg = calgary_map_sg,
+ .unmap_sg = calgary_unmap_sg,
+};
+
+static inline int busno_to_phbid(unsigned char num)
+{
+ return bus_to_phb(num) % PHBS_PER_CALGARY;
+}
+
+static inline unsigned long split_queue_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return split_queue_offsets[idx];
+}
+
+static inline unsigned long tar_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return tar_offsets[idx];
+}
+
+static inline unsigned long phb_offset(unsigned char num)
+{
+ size_t idx = busno_to_phbid(num);
+
+ return phb_offsets[idx];
+}
+
+static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
+{
+ unsigned long target = ((unsigned long)bar) | offset;
+ return (void __iomem*)target;
+}
+
+static void tce_cache_blast(struct iommu_table *tbl)
+{
+ u64 val;
+ u32 aer;
+ int i = 0;
+ void __iomem *bbar = tbl->bbar;
+ void __iomem *target;
+
+ /* disable arbitration on the bus */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
+ aer = readl(target);
+ writel(0, target);
+
+ /* read plssr to ensure it got there */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
+ val = readl(target);
+
+ /* poll split queues until all DMA activity is done */
+ target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
+ do {
+ val = readq(target);
+ i++;
+ } while ((val & 0xff) != 0xff && i < 100);
+ if (i == 100)
+ printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
+ "continuing anyway\n");
+
+ /* invalidate TCE cache */
+ target = calgary_reg(bbar, tar_offset(tbl->it_busno));
+ writeq(tbl->tar_val, target);
+
+ /* enable arbitration */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
+ writel(aer, target);
+ (void)readl(target); /* flush */
+}
+
+static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
+ u64 limit)
+{
+ unsigned int numpages;
+
+ limit = limit | 0xfffff;
+ limit++;
+
+ numpages = ((limit - start) >> PAGE_SHIFT);
+ iommu_range_reserve(dev->sysdata, start, numpages);
+}
+
+static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
+{
+ void __iomem *target;
+ u64 low, high, sizelow;
+ u64 start, limit;
+ struct iommu_table *tbl = dev->sysdata;
+ unsigned char busnum = dev->bus->number;
+ void __iomem *bbar = tbl->bbar;
+
+ /* peripheral MEM_1 region */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
+ low = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
+ high = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
+ sizelow = be32_to_cpu(readl(target));
+
+ start = (high << 32) | low;
+ limit = sizelow;
+
+ calgary_reserve_mem_region(dev, start, limit);
+}
+
+static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
+{
+ void __iomem *target;
+ u32 val32;
+ u64 low, high, sizelow, sizehigh;
+ u64 start, limit;
+ struct iommu_table *tbl = dev->sysdata;
+ unsigned char busnum = dev->bus->number;
+ void __iomem *bbar = tbl->bbar;
+
+ /* is it enabled? */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ if (!(val32 & PHB_MEM2_ENABLE))
+ return;
+
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
+ low = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
+ high = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
+ sizelow = be32_to_cpu(readl(target));
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
+ sizehigh = be32_to_cpu(readl(target));
+
+ start = (high << 32) | low;
+ limit = (sizehigh << 32) | sizelow;
+
+ calgary_reserve_mem_region(dev, start, limit);
+}
+
+/*
+ * some regions of the IO address space do not get translated, so we
+ * must not give devices IO addresses in those regions. The regions
+ * are the 640KB-1MB region and the two PCI peripheral memory holes.
+ * Reserve all of them in the IOMMU bitmap to avoid giving them out
+ * later.
+ */
+static void __init calgary_reserve_regions(struct pci_dev *dev)
+{
+ unsigned int npages;
+ void __iomem *bbar;
+ unsigned char busnum;
+ u64 start;
+ struct iommu_table *tbl = dev->sysdata;
+
+ bbar = tbl->bbar;
+ busnum = dev->bus->number;
+
+ /* reserve bad_dma_address in case it's a legal address */
+ iommu_range_reserve(tbl, bad_dma_address, 1);
+
+ /* avoid the BIOS/VGA first 640KB-1MB region */
+ start = (640 * 1024);
+ npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
+ iommu_range_reserve(tbl, start, npages);
+
+ /* reserve the two PCI peripheral memory regions in IO space */
+ calgary_reserve_peripheral_mem_1(dev);
+ calgary_reserve_peripheral_mem_2(dev);
+}
+
+static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
+{
+ u64 val64;
+ u64 table_phys;
+ void __iomem *target;
+ int ret;
+ struct iommu_table *tbl;
+
+ /* build TCE tables for each PHB */
+ ret = build_tce_table(dev, bbar);
+ if (ret)
+ return ret;
+
+ calgary_reserve_regions(dev);
+
+ /* set TARs for each PHB */
+ target = calgary_reg(bbar, tar_offset(dev->bus->number));
+ val64 = be64_to_cpu(readq(target));
+
+ /* zero out all TAR bits under sw control */
+ val64 &= ~TAR_SW_BITS;
+
+ tbl = dev->sysdata;
+ table_phys = (u64)__pa(tbl->it_base);
+ val64 |= table_phys;
+
+ BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
+ val64 |= (u64) specified_table_size;
+
+ tbl->tar_val = cpu_to_be64(val64);
+ writeq(tbl->tar_val, target);
+ readq(target); /* flush */
+
+ return 0;
+}
+
+static void __init calgary_free_tar(struct pci_dev *dev)
+{
+ u64 val64;
+ struct iommu_table *tbl = dev->sysdata;
+ void __iomem *target;
+
+ target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
+ val64 = be64_to_cpu(readq(target));
+ val64 &= ~TAR_SW_BITS;
+ writeq(cpu_to_be64(val64), target);
+ readq(target); /* flush */
+
+ kfree(tbl);
+ dev->sysdata = NULL;
+}
+
+static void calgary_watchdog(unsigned long data)
+{
+ struct pci_dev *dev = (struct pci_dev *)data;
+ struct iommu_table *tbl = dev->sysdata;
+ void __iomem *bbar = tbl->bbar;
+ u32 val32;
+ void __iomem *target;
+
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+
+ /* If no error, the agent ID in the CSR is not valid */
+ if (val32 & CSR_AGENT_MASK) {
+ printk(KERN_EMERG "calgary_watchdog: DMA error on bus %d, "
+ "CSR = %#x\n", dev->bus->number, val32);
+ writel(0, target);
+
+ /* Disable bus that caused the error */
+ target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
+ PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 |= PHB_SLOT_DISABLE;
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+ } else {
+ /* Reset the timer */
+ mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
+ }
+}
+
+static void __init calgary_enable_translation(struct pci_dev *dev)
+{
+ u32 val32;
+ unsigned char busnum;
+ void __iomem *target;
+ void __iomem *bbar;
+ struct iommu_table *tbl;
+
+ busnum = dev->bus->number;
+ tbl = dev->sysdata;
+ bbar = tbl->bbar;
+
+ /* enable TCE in PHB Config Register */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
+
+ printk(KERN_INFO "Calgary: enabling translation on PHB %d\n", busnum);
+ printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
+ "bus.\n");
+
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+
+ init_timer(&tbl->watchdog_timer);
+ tbl->watchdog_timer.function = &calgary_watchdog;
+ tbl->watchdog_timer.data = (unsigned long)dev;
+ mod_timer(&tbl->watchdog_timer, jiffies);
+}
+
+static void __init calgary_disable_translation(struct pci_dev *dev)
+{
+ u32 val32;
+ unsigned char busnum;
+ void __iomem *target;
+ void __iomem *bbar;
+ struct iommu_table *tbl;
+
+ busnum = dev->bus->number;
+ tbl = dev->sysdata;
+ bbar = tbl->bbar;
+
+ /* disable TCE in PHB Config Register */
+ target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
+ val32 = be32_to_cpu(readl(target));
+ val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
+
+ printk(KERN_INFO "Calgary: disabling translation on PHB %d!\n", busnum);
+ writel(cpu_to_be32(val32), target);
+ readl(target); /* flush */
+
+ del_timer_sync(&tbl->watchdog_timer);
+}
+
+static inline unsigned int __init locate_register_space(struct pci_dev *dev)
+{
+ int rionodeid;
+ u32 address;
+
+ rionodeid = (dev->bus->number % 15 > 4) ? 3 : 2;
+ /*
+ * register space address calculation as follows:
+ * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
+ * ChassisBase is always zero for x366/x260/x460
+ * RioNodeId is 2 for first Calgary, 3 for second Calgary
+ */
+ address = START_ADDRESS -
+ (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 15)) +
+ (0x100000) * (rionodeid - CHASSIS_BASE);
+ return address;
+}
+
+static int __init calgary_init_one_nontraslated(struct pci_dev *dev)
+{
+ dev->sysdata = NULL;
+ dev->bus->self = dev;
+
+ return 0;
+}
+
+static int __init calgary_init_one(struct pci_dev *dev)
+{
+ u32 address;
+ void __iomem *bbar;
+ int ret;
+
+ address = locate_register_space(dev);
+ /* map entire 1MB of Calgary config space */
+ bbar = ioremap_nocache(address, 1024 * 1024);
+ if (!bbar) {
+ ret = -ENODATA;
+ goto done;
+ }
+
+ ret = calgary_setup_tar(dev, bbar);
+ if (ret)
+ goto iounmap;
+
+ dev->bus->self = dev;
+ calgary_enable_translation(dev);
+
+ return 0;
+
+iounmap:
+ iounmap(bbar);
+done:
+ return ret;
+}
+
+static int __init calgary_init(void)
+{
+ int i, ret = -ENODEV;
+ struct pci_dev *dev = NULL;
+
+ for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) {
+ dev = pci_get_device(PCI_VENDOR_ID_IBM,
+ PCI_DEVICE_ID_IBM_CALGARY,
+ dev);
+ if (!dev)
+ break;
+ if (!translate_phb(dev)) {
+ calgary_init_one_nontraslated(dev);
+ continue;
+ }
+ if (!tce_table_kva[i] && !translate_empty_slots) {
+ pci_dev_put(dev);
+ continue;
+ }
+ ret = calgary_init_one(dev);
+ if (ret)
+ goto error;
+ }
+
+ return ret;
+
+error:
+ for (i--; i >= 0; i--) {
+ dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
+ PCI_DEVICE_ID_IBM_CALGARY,
+ dev);
+ if (!translate_phb(dev)) {
+ pci_dev_put(dev);
+ continue;
+ }
+ if (!tce_table_kva[i] && !translate_empty_slots)
+ continue;
+ calgary_disable_translation(dev);
+ calgary_free_tar(dev);
+ pci_dev_put(dev);
+ }
+
+ return ret;
+}
+
+static inline int __init determine_tce_table_size(u64 ram)
+{
+ int ret;
+
+ if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
+ return specified_table_size;
+
+ /*
+ * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
+ * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
+ * larger table size has twice as many entries, so shift the
+ * max ram address by 13 to divide by 8K and then look at the
+ * order of the result to choose between 0-7.
+ */
+ ret = get_order(ram >> 13);
+ if (ret > TCE_TABLE_SIZE_8M)
+ ret = TCE_TABLE_SIZE_8M;
+
+ return ret;
+}
+
+void __init detect_calgary(void)
+{
+ u32 val;
+ int bus, table_idx;
+ void *tbl;
+ int detected = 0;
+
+ /*
+ * if the user specified iommu=off or iommu=soft or we found
+ * another HW IOMMU already, bail out.
+ */
+ if (swiotlb || no_iommu || iommu_detected)
+ return;
+
+ specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
+
+ for (bus = 0, table_idx = 0;
+ bus <= num_online_nodes() * MAX_PHB_BUS_NUM;
+ bus++) {
+ BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM);
+ if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
+ continue;
+ if (test_bit(bus, translation_disabled)) {
+ printk(KERN_INFO "Calgary: translation is disabled for "
+ "PHB 0x%x\n", bus);
+ /* skip this phb, don't allocate a tbl for it */
+ tce_table_kva[table_idx] = NULL;
+ table_idx++;
+ continue;
+ }
+ /*
+ * scan the first slot of the PCI bus to see if there
+ * are any devices present
+ */
+ val = read_pci_config(bus, 1, 0, 0);
+ if (val != 0xffffffff || translate_empty_slots) {
+ tbl = alloc_tce_table();
+ if (!tbl)
+ goto cleanup;
+ detected = 1;
+ } else
+ tbl = NULL;
+
+ tce_table_kva[table_idx] = tbl;
+ table_idx++;
+ }
+
+ if (detected) {
+ iommu_detected = 1;
+ calgary_detected = 1;
+ printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
+ "TCE table spec is %d.\n", specified_table_size);
+ }
+ return;
+
+cleanup:
+ for (--table_idx; table_idx >= 0; --table_idx)
+ if (tce_table_kva[table_idx])
+ free_tce_table(tce_table_kva[table_idx]);
+}
+
+int __init calgary_iommu_init(void)
+{
+ int ret;
+
+ if (no_iommu || swiotlb)
+ return -ENODEV;
+
+ if (!calgary_detected)
+ return -ENODEV;
+
+ /* ok, we're trying to use Calgary - let's roll */
+ printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
+
+ ret = calgary_init();
+ if (ret) {
+ printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
+ "falling back to no_iommu\n", ret);
+ if (end_pfn > MAX_DMA32_PFN)
+ printk(KERN_ERR "WARNING more than 4GB of memory, "
+ "32bit PCI may malfunction.\n");
+ return ret;
+ }
+
+ force_iommu = 1;
+ dma_ops = &calgary_dma_ops;
+
+ return 0;
+}
+
+static int __init calgary_parse_options(char *p)
+{
+ unsigned int bridge;
+ size_t len;
+ char* endp;
+
+ while (*p) {
+ if (!strncmp(p, "64k", 3))
+ specified_table_size = TCE_TABLE_SIZE_64K;
+ else if (!strncmp(p, "128k", 4))
+ specified_table_size = TCE_TABLE_SIZE_128K;
+ else if (!strncmp(p, "256k", 4))
+ specified_table_size = TCE_TABLE_SIZE_256K;
+ else if (!strncmp(p, "512k", 4))
+ specified_table_size = TCE_TABLE_SIZE_512K;
+ else if (!strncmp(p, "1M", 2))
+ specified_table_size = TCE_TABLE_SIZE_1M;
+ else if (!strncmp(p, "2M", 2))
+ specified_table_size = TCE_TABLE_SIZE_2M;
+ else if (!strncmp(p, "4M", 2))
+ specified_table_size = TCE_TABLE_SIZE_4M;
+ else if (!strncmp(p, "8M", 2))
+ specified_table_size = TCE_TABLE_SIZE_8M;
+
+ len = strlen("translate_empty_slots");
+ if (!strncmp(p, "translate_empty_slots", len))
+ translate_empty_slots = 1;
+
+ len = strlen("disable");
+ if (!strncmp(p, "disable", len)) {
+ p += len;
+ if (*p == '=')
+ ++p;
+ if (*p == '\0')
+ break;
+ bridge = simple_strtol(p, &endp, 0);
+ if (p == endp)
+ break;
+
+ if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) {
+ printk(KERN_INFO "Calgary: disabling "
+ "translation for PHB 0x%x\n", bridge);
+ set_bit(bridge, translation_disabled);
+ }
+ }
+
+ p = strpbrk(p, ",");
+ if (!p)
+ break;
+
+ p++; /* skip ',' */
+ }
+ return 1;
+}
+__setup("calgary=", calgary_parse_options);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index a9275c9557cf..9c44f4f2433d 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/proto.h>
+#include <asm/calgary.h>
int iommu_merge __read_mostly = 0;
EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly= 0;
#endif
+/* Set this to 1 if there is a HW IOMMU in the system */
+int iommu_detected __read_mostly = 0;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
struct device fallback_dev = {
.bus_id = "fallback device",
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask,
};
@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
dev = &fallback_dev;
dma_mask = dev->coherent_dma_mask;
if (dma_mask == 0)
- dma_mask = 0xffffffff;
+ dma_mask = DMA_32BIT_MASK;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */
- if (dma_mask <= 0xffffffff)
+ if (dma_mask <= DMA_32BIT_MASK)
gfp |= GFP_DMA32;
again:
@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* Don't use the 16MB ZONE_DMA unless absolutely
needed. It's better to use remapping first. */
- if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
+ if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
- if (mask < 0x00ffffff)
+ if (mask < DMA_24BIT_MASK)
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
+ if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
return 0;
}
@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
swiotlb = 1;
#endif
-#ifdef CONFIG_GART_IOMMU
+#ifdef CONFIG_IOMMU
gart_parse_options(p);
#endif
@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
}
return 1;
}
+__setup("iommu=", iommu_setup);
+
+void __init pci_iommu_alloc(void)
+{
+ /*
+ * The order of these functions is important for
+ * fall-back/fail-over reasons
+ */
+#ifdef CONFIG_IOMMU
+ iommu_hole_init();
+#endif
+
+#ifdef CONFIG_CALGARY_IOMMU
+ detect_calgary();
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ pci_swiotlb_init();
+#endif
+}
+
+static int __init pci_iommu_init(void)
+{
+#ifdef CONFIG_CALGARY_IOMMU
+ calgary_iommu_init();
+#endif
+
+#ifdef CONFIG_IOMMU
+ gart_iommu_init();
+#endif
+
+ no_iommu_init();
+ return 0;
+}
+
+/* Must execute after PCI subsystem */
+fs_initcall(pci_iommu_init);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 82a7c9bfdfa0..4ca674d16b09 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -32,6 +32,7 @@
#include <asm/kdebug.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
+#include <asm/k8.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */
also seen with Qlogic at least). */
int iommu_fullflush = 1;
-#define MAX_NB 8
-
/* Allocation bitmap for the remapping area */
static DEFINE_SPINLOCK(iommu_bitmap_lock);
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry;
#define to_pages(addr,size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
-#define for_all_nb(dev) \
- dev = NULL; \
- while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
-
-static struct pci_dev *northbridges[MAX_NB];
-static u32 northbridge_flush_word[MAX_NB];
-
#define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP
@@ -93,7 +85,7 @@ static unsigned long alloc_iommu(int size)
offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
if (offset == -1) {
need_flush = 1;
- offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
+ offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
}
if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size);
@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size)
/*
* Use global flush state to avoid races with multiple flushers.
*/
-static void flush_gart(struct device *dev)
+static void flush_gart(void)
{
unsigned long flags;
- int flushed = 0;
- int i, max;
-
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- if (need_flush) {
- max = 0;
- for (i = 0; i < MAX_NB; i++) {
- if (!northbridges[i])
- continue;
- pci_write_config_dword(northbridges[i], 0x9c,
- northbridge_flush_word[i] | 1);
- flushed++;
- max = i;
- }
- for (i = 0; i <= max; i++) {
- u32 w;
- if (!northbridges[i])
- continue;
- /* Make sure the hardware actually executed the flush. */
- for (;;) {
- pci_read_config_dword(northbridges[i], 0x9c, &w);
- if (!(w & 1))
- break;
- cpu_relax();
- }
- }
- if (!flushed)
- printk("nothing to flush?\n");
+ if (need_flush) {
+ k8_flush_garts();
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
-
-
#ifdef CONFIG_IOMMU_LEAK
#define SET_LEAK(x) if (iommu_leak_tab) \
@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
size_t size, int dir)
{
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
- flush_gart(dev);
+ flush_gart();
return map;
}
@@ -289,6 +254,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
}
/*
+ * Free a DMA mapping.
+ */
+void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ unsigned long iommu_page;
+ int npages;
+ int i;
+
+ if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
+ dma_addr >= iommu_bus_base + iommu_size)
+ return;
+ iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
+ npages = to_pages(dma_addr, size);
+ for (i = 0; i < npages; i++) {
+ iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
+ CLEAR_LEAK(iommu_page + i);
+ }
+ free_iommu(iommu_page, npages);
+}
+
+/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
@@ -299,7 +286,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
struct scatterlist *s = &sg[i];
if (!s->dma_length || !s->length)
break;
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
}
}
@@ -329,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
s->dma_address = addr;
s->dma_length = s->length;
}
- flush_gart(dev);
+ flush_gart();
return nents;
}
@@ -436,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
goto error;
out++;
- flush_gart(dev);
+ flush_gart();
if (out < nents)
sg[out].dma_length = 0;
return out;
error:
- flush_gart(NULL);
+ flush_gart();
gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
@@ -458,28 +445,6 @@ error:
return 0;
}
-/*
- * Free a DMA mapping.
- */
-void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- unsigned long iommu_page;
- int npages;
- int i;
-
- if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
- dma_addr >= iommu_bus_base + iommu_size)
- return;
- iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
- npages = to_pages(dma_addr, size);
- for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
- CLEAR_LEAK(iommu_page + i);
- }
- free_iommu(iommu_page, npages);
-}
-
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
void *gatt;
unsigned aper_base, new_aper_base;
unsigned aper_size, gatt_size, new_aper_size;
-
+ int i;
+
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
- for_all_nb(dev) {
+ dev = NULL;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ dev = k8_northbridges[i];
new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base)
goto nommu;
@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
-
- for_all_nb(dev) {
+
+ for (i = 0; i < num_k8_northbridges; i++) {
u32 ctl;
u32 gatt_reg;
+ dev = k8_northbridges[i];
gatt_reg = __pa(gatt) >> 12;
gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg);
@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
pci_write_config_dword(dev, 0x90, ctl);
}
- flush_gart(NULL);
+ flush_gart();
printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
return 0;
@@ -602,15 +571,19 @@ static struct dma_mapping_ops gart_dma_ops = {
.unmap_sg = gart_unmap_sg,
};
-static int __init pci_iommu_init(void)
+void __init gart_iommu_init(void)
{
struct agp_kern_info info;
unsigned long aper_size;
unsigned long iommu_start;
- struct pci_dev *dev;
unsigned long scratch;
long i;
+ if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
+ printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
+ return;
+ }
+
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
#else
@@ -622,7 +595,11 @@ static int __init pci_iommu_init(void)
#endif
if (swiotlb)
- return -1;
+ return;
+
+ /* Did we detect a different HW IOMMU? */
+ if (iommu_detected && !iommu_aperture)
+ return;
if (no_iommu ||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
@@ -634,15 +611,7 @@ static int __init pci_iommu_init(void)
"but IOMMU not available.\n"
KERN_ERR "WARNING 32bit PCI may malfunction.\n");
}
- return -1;
- }
-
- i = 0;
- for_all_nb(dev)
- i++;
- if (i > MAX_NB) {
- printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
- return -1;
+ return;
}
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
@@ -707,26 +676,10 @@ static int __init pci_iommu_init(void)
for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry;
- for_all_nb(dev) {
- u32 flag;
- int cpu = PCI_SLOT(dev->devfn) - 24;
- if (cpu >= MAX_NB)
- continue;
- northbridges[cpu] = dev;
- pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
- northbridge_flush_word[cpu] = flag;
- }
-
- flush_gart(NULL);
-
+ flush_gart();
dma_ops = &gart_dma_ops;
-
- return 0;
}
-/* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
-
void gart_parse_options(char *p)
{
int arg;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 1f6ecc62061d..c4c3cc36ac5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -4,6 +4,8 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
#include <asm/proto.h>
#include <asm/processor.h>
#include <asm/dma.h>
@@ -12,10 +14,11 @@ static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && bus + size > *hwdev->dma_mask) {
- if (*hwdev->dma_mask >= 0xffffffffULL)
+ if (*hwdev->dma_mask >= DMA_32BIT_MASK)
printk(KERN_ERR
- "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
- name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
+ name, (long long)bus, size,
+ (long long)*hwdev->dma_mask);
return 0;
}
return 1;
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 990ed67896f2..ebdb77fe2057 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
void pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
- if (!iommu_aperture && !no_iommu &&
+ if (!iommu_detected && !no_iommu &&
(end_pfn > MAX_DMA32_PFN || force_iommu))
swiotlb = 1;
if (swiotlb) {
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index bf421ed26808..7554458dc9cb 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -27,7 +27,7 @@
/* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/kernel/acpi/boot.c */
-u32 pmtmr_ioport;
+u32 pmtmr_ioport __read_mostly;
/* value of the Power timer at last timer interrupt */
static u32 offset_delay;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index fb903e65e079..ca56e19b8b6e 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -10,7 +10,6 @@
* Andi Kleen.
*
* CPU hotplug support - ashok.raj@intel.com
- * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
*/
/*
@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -111,7 +111,7 @@ static void default_idle(void)
{
local_irq_enable();
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
while (!need_resched()) {
local_irq_disable();
@@ -120,7 +120,7 @@ static void default_idle(void)
else
local_irq_enable();
}
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
}
/*
@@ -203,8 +203,7 @@ static inline void play_dead(void)
*/
void cpu_idle (void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
+ current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs)
{
printk("CPU %d:", smp_processor_id());
__show_regs(regs);
- show_trace(&regs->rsp);
+ show_trace(NULL, regs, (void *)(regs + 1));
}
/*
@@ -365,8 +364,11 @@ void flush_thread(void)
struct task_struct *tsk = current;
struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING)
+ if (t->flags & _TIF_ABI_PENDING) {
t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
+ if (t->flags & _TIF_IA32)
+ current_thread_info()->status |= TS_COMPAT;
+ }
tsk->thread.debugreg0 = 0;
tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 57117b8beb2b..2d6769847456 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -20,6 +20,7 @@
* Power off function, if any
*/
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
static long no_idt[3];
static enum {
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index fdb82658b1a1..1129918ede82 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -5,8 +5,6 @@
*
* Nov 2001 Dave Jones <davej@suse.de>
* Forked from i386 setup code.
- *
- * $Id$
*/
/*
@@ -47,7 +45,6 @@
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
-#include <linux/suspend.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -66,9 +63,7 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
-#include <asm/swiotlb.h>
#include <asm/sections.h>
-#include <asm/gart-mapping.h>
#include <asm/dmi.h>
/*
@@ -76,6 +71,7 @@
*/
struct cpuinfo_x86 boot_cpu_data __read_mostly;
+EXPORT_SYMBOL(boot_cpu_data);
unsigned long mmu_cr4_features;
@@ -104,12 +100,14 @@ char dmi_alloc_data[DMI_MAX_DATA];
* Setup options
*/
struct screen_info screen_info;
+EXPORT_SYMBOL(screen_info);
struct sys_desc_table_struct {
unsigned short length;
unsigned char table[0];
};
struct edid_info edid_info;
+EXPORT_SYMBOL_GPL(edid_info);
struct e820map e820;
extern int root_mountflags;
@@ -474,80 +472,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
}
#endif
-/* Use inline assembly to define this because the nops are defined
- as inline assembly strings in the include files and we cannot
- get them easily into strings. */
-asm("\t.data\nk8nops: "
- K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
- K8_NOP7 K8_NOP8);
-
-extern unsigned char k8nops[];
-static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
- NULL,
- k8nops,
- k8nops + 1,
- k8nops + 1 + 2,
- k8nops + 1 + 2 + 3,
- k8nops + 1 + 2 + 3 + 4,
- k8nops + 1 + 2 + 3 + 4 + 5,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-};
-
-extern char __vsyscall_0;
-
-/* Replace instructions with better alternatives for this CPU type.
-
- This runs before SMP is initialized to avoid SMP problems with
- self modifying code. This implies that assymetric systems where
- APs have less capabilities than the boot processor are not handled.
- In this case boot with "noreplacement". */
-void apply_alternatives(void *start, void *end)
-{
- struct alt_instr *a;
- int diff, i, k;
- for (a = start; (void *)a < end; a++) {
- u8 *instr;
-
- if (!boot_cpu_has(a->cpuid))
- continue;
-
- BUG_ON(a->replacementlen > a->instrlen);
- instr = a->instr;
- /* vsyscall code is not mapped yet. resolve it manually. */
- if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
- instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
- __inline_memcpy(instr, a->replacement, a->replacementlen);
- diff = a->instrlen - a->replacementlen;
-
- /* Pad the rest with nops */
- for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
- k = diff;
- if (k > ASM_NOP_MAX)
- k = ASM_NOP_MAX;
- __inline_memcpy(instr + i, k8_nops[k], k);
- }
- }
-}
-
-static int no_replacement __initdata = 0;
-
-void __init alternative_instructions(void)
-{
- extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
- if (no_replacement)
- return;
- apply_alternatives(__alt_instructions, __alt_instructions_end);
-}
-
-static int __init noreplacement_setup(char *s)
-{
- no_replacement = 1;
- return 1;
-}
-
-__setup("noreplacement", noreplacement_setup);
-
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
struct edd edd;
#ifdef CONFIG_EDD_MODULE
@@ -596,100 +520,6 @@ static void discover_ebda(void)
ebda_size = 64*1024;
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
-static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
-{
- struct page *page;
- while (start <= end) {
- page = pfn_to_page(start);
- SetPageNosave(page);
- start++;
- }
-}
-
-static void __init e820_nosave_reserved_pages(void)
-{
- int i;
- unsigned long r_start = 0, r_end = 0;
-
- /* Assume e820 map is sorted */
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- unsigned long start, end;
-
- start = round_down(ei->addr, PAGE_SIZE);
- end = round_up(ei->addr + ei->size, PAGE_SIZE);
- if (start >= end)
- continue;
- if (ei->type == E820_RESERVED)
- continue;
- r_end = start>>PAGE_SHIFT;
- /* swsusp ignores invalid pfn, ignore these pages here */
- if (r_end > end_pfn)
- r_end = end_pfn;
- if (r_end > r_start)
- mark_nosave_page_range(r_start, r_end-1);
- if (r_end >= end_pfn)
- break;
- r_start = end>>PAGE_SHIFT;
- }
-}
-
-static void __init e820_save_acpi_pages(void)
-{
- int i;
-
- /* Assume e820 map is sorted */
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- unsigned long start, end;
-
- start = ei->addr, PAGE_SIZE;
- end = ei->addr + ei->size;
- if (start >= end)
- continue;
- if (ei->type != E820_ACPI && ei->type != E820_NVS)
- continue;
- /*
- * If the region is below end_pfn, it will be
- * saved/restored by swsusp follow 'RAM' type.
- */
- if (start < (end_pfn << PAGE_SHIFT))
- start = end_pfn << PAGE_SHIFT;
- if (end > start)
- swsusp_add_arch_pages(start, end);
- }
-}
-
-extern char __start_rodata, __end_rodata;
-/*
- * BIOS reserved region/hole - no save/restore
- * ACPI NVS - save/restore
- * ACPI Data - this is a little tricky, the mem could be used by OS after OS
- * reads tables from the region, but anyway save/restore the memory hasn't any
- * side effect and Linux runtime module load/unload might use it.
- * kernel rodata - no save/restore (kernel rodata isn't changed)
- */
-static int __init mark_nosave_pages(void)
-{
- unsigned long pfn_start, pfn_end;
-
- /* BIOS reserved regions & holes */
- e820_nosave_reserved_pages();
-
- /* kernel rodata */
- pfn_start = round_up(__pa_symbol(&__start_rodata), PAGE_SIZE) >> PAGE_SHIFT;
- pfn_end = round_down(__pa_symbol(&__end_rodata), PAGE_SIZE) >> PAGE_SHIFT;
- mark_nosave_page_range(pfn_start, pfn_end-1);
-
- /* record ACPI Data/NVS as saveable */
- e820_save_acpi_pages();
-
- return 0;
-}
-core_initcall(mark_nosave_pages);
-#endif
-
void __init setup_arch(char **cmdline_p)
{
unsigned long kernel_end;
@@ -874,10 +704,6 @@ void __init setup_arch(char **cmdline_p)
e820_setup_gap();
-#ifdef CONFIG_GART_IOMMU
- iommu_hole_init();
-#endif
-
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
@@ -962,24 +788,32 @@ static int nearby_node(int apicid)
static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- int cpu = smp_processor_id();
unsigned bits;
#ifdef CONFIG_NUMA
+ int cpu = smp_processor_id();
int node = 0;
unsigned apicid = hard_smp_processor_id();
#endif
+ unsigned ecx = cpuid_ecx(0x80000008);
- bits = 0;
- while ((1 << bits) < c->x86_max_cores)
- bits++;
+ c->x86_max_cores = (ecx & 0xff) + 1;
+
+ /* CPU telling us the core id bits shift? */
+ bits = (ecx >> 12) & 0xF;
+
+ /* Otherwise recompute */
+ if (bits == 0) {
+ while ((1 << bits) < c->x86_max_cores)
+ bits++;
+ }
/* Low order bits define the core id (index of core in socket) */
- cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
+ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
/* Convert the APIC ID into the socket ID */
- phys_proc_id[cpu] = phys_pkg_id(bits);
+ c->phys_proc_id = phys_pkg_id(bits);
#ifdef CONFIG_NUMA
- node = phys_proc_id[cpu];
+ node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid];
if (!node_online(node)) {
@@ -992,7 +826,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the
path for the previous case. */
- int ht_nodeid = apicid - (phys_proc_id[0] << bits);
+ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid];
@@ -1002,15 +836,13 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
}
numa_set_node(cpu, node);
- printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
- cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
#endif
}
-static int __init init_amd(struct cpuinfo_x86 *c)
+static void __init init_amd(struct cpuinfo_x86 *c)
{
- int r;
unsigned level;
#ifdef CONFIG_SMP
@@ -1043,8 +875,8 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (c->x86 >= 6)
set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
- r = get_model_name(c);
- if (!r) {
+ level = get_model_name(c);
+ if (!level) {
switch (c->x86) {
case 15:
/* Should distinguish Models here, but this is only
@@ -1059,13 +891,12 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1<<8))
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
- if (c->extended_cpuid_level >= 0x80000008) {
- c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-
+ /* Multi core CPU? */
+ if (c->extended_cpuid_level >= 0x80000008)
amd_detect_cmp(c);
- }
- return r;
+ /* Fix cpuid4 emulation for more */
+ num_cache_leaves = 3;
}
static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -1073,13 +904,14 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
- int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
- if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ if (!cpu_has(c, X86_FEATURE_HT))
return;
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ goto out;
smp_num_siblings = (ebx & 0xff0000) >> 16;
@@ -1094,10 +926,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
}
index_msb = get_count_order(smp_num_siblings);
- phys_proc_id[cpu] = phys_pkg_id(index_msb);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ c->phys_proc_id = phys_pkg_id(index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
@@ -1105,13 +934,15 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores);
- cpu_core_id[cpu] = phys_pkg_id(index_msb) &
+ c->cpu_core_id = phys_pkg_id(index_msb) &
((1 << core_bits) - 1);
-
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- cpu_core_id[cpu]);
}
+out:
+ if ((c->x86_max_cores * smp_num_siblings) > 1) {
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
+ }
+
#endif
}
@@ -1120,15 +951,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
*/
static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
{
- unsigned int eax;
+ unsigned int eax, t;
if (c->cpuid_level < 4)
return 1;
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (4), "c" (0)
- : "bx", "dx");
+ cpuid_count(4, 0, &eax, &t, &t, &t);
if (eax & 0x1f)
return ((eax >> 26) + 1);
@@ -1141,16 +969,17 @@ static void srat_detect_node(void)
#ifdef CONFIG_NUMA
unsigned node;
int cpu = smp_processor_id();
+ int apicid = hard_smp_processor_id();
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
- node = apicid_to_node[hard_smp_processor_id()];
+ node = apicid_to_node[apicid];
if (node == NUMA_NO_NODE)
node = first_node(node_online_map);
numa_set_node(cpu, node);
if (acpi_numa > 0)
- printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
#endif
}
@@ -1160,6 +989,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
unsigned n;
init_intel_cacheinfo(c);
+ if (c->cpuid_level > 9 ) {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
+ }
+
n = c->extended_cpuid_level;
if (n >= 0x80000008) {
unsigned eax = cpuid_eax(0x80000008);
@@ -1251,7 +1087,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
}
#ifdef CONFIG_SMP
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif
}
@@ -1378,7 +1214,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1389,7 +1225,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* Other (Linux-defined) */
"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
"constant_tsc", NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1459,9 +1295,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
- seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
- seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
@@ -1535,7 +1371,7 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-#ifdef CONFIG_INPUT_PCSPKR
+#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
#include <linux/platform_device.h>
static __init int add_pcspkr(void)
{
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8a691fa6d393..f5934cb4a2b6 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -3,7 +3,6 @@
* Copyright (C) 1995 Linus Torvalds
* Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
* See setup.c for older changelog.
- * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -31,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(_cpu_pda);
struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
@@ -38,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
unsigned long __supported_pte_mask __read_mostly = ~0UL;
+EXPORT_SYMBOL(__supported_pte_mask);
static int do_not_nx __cpuinitdata = 0;
/* noexec=on|off
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index e5f5ce7909a3..28161170fb0a 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -7,8 +7,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-2002 x86-64 support by Andi Kleen
- *
- * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
*/
#include <linux/sched.h>
@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
rsp = regs->rsp - 128;
/* This is the X/Open sanctioned signal stack switching. */
- /* RED-PEN: redzone on that stack? */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(rsp) == 0)
rsp = current->sas_ss_sp + current->sas_ss_size;
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4a6628b14d99..5a1c0a3bf872 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = smp_processor_id();
/*
- * orig_rax contains the interrupt vector - 256.
+ * orig_rax contains the negated interrupt vector.
* Use that to determine where the sender put the data.
*/
- sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
+ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask))
@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm)
{
@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_unlock(&call_lock);
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
void smp_stop_cpu(void)
{
@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
{
smp_stop_cpu();
for (;;)
- asm("hlt");
+ halt();
}
void smp_send_stop(void)
@@ -470,7 +474,7 @@ void smp_send_stop(void)
return;
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have paniced anyways */
+ /* ignore locking because we have panicked anyways */
nolock = 1;
}
__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
int safe_smp_processor_id(void)
{
- int apicid, i;
+ unsigned apicid, i;
if (disable_apic)
return 0;
apicid = hard_smp_processor_id();
- if (x86_cpu_to_apicid[apicid] == apicid)
+ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
return apicid;
for (i = 0; i < NR_CPUS; ++i) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 71a7222cf9ce..540c0ccbcccc 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -63,13 +63,11 @@
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
-/* Package ID of each logical CPU */
-u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-/* core ID of each logical CPU */
-u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
+EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+EXPORT_SYMBOL(cpu_llc_id);
/* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly;
@@ -82,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map);
*/
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
+EXPORT_SYMBOL(cpu_callout_map);
cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
/* Set when the idlers are all forked */
int smp_threads_ready;
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
@@ -454,10 +455,12 @@ cpumask_t cpu_coregroup_map(int cpu)
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
- * TBD: when power saving sched policy is added, we will return
- * cpu_core_map when power saving policy is enabled
+ * And for power savings, we return cpu_core_map
*/
- return c->llc_shared_map;
+ if (sched_mc_power_savings || sched_smt_power_savings)
+ return cpu_core_map[cpu];
+ else
+ return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
@@ -472,8 +475,8 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (phys_proc_id[cpu] == phys_proc_id[i] &&
- cpu_core_id[cpu] == cpu_core_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
+ c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +503,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
@@ -797,6 +800,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
}
+ alternatives_smp_switch(1);
+
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
@@ -1199,8 +1204,8 @@ static void remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
- phys_proc_id[cpu] = BAD_APICID;
- cpu_core_id[cpu] = BAD_APICID;
+ c[cpu].phys_proc_id = 0;
+ c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
@@ -1259,6 +1264,8 @@ void __cpu_die(unsigned int cpu)
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
printk ("CPU %d is now offline\n", cpu);
+ if (1 == num_online_cpus())
+ alternatives_smp_switch(0);
return;
}
msleep(100);
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
new file mode 100644
index 000000000000..8d4c67f61b8e
--- /dev/null
+++ b/arch/x86_64/kernel/tce.c
@@ -0,0 +1,202 @@
+/*
+ * Derived from arch/powerpc/platforms/pseries/iommu.c
+ *
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/bootmem.h>
+#include <asm/tce.h>
+#include <asm/calgary.h>
+#include <asm/proto.h>
+
+/* flush a tce at 'tceaddr' to main memory */
+static inline void flush_tce(void* tceaddr)
+{
+ /* a single tce can't cross a cache line */
+ if (cpu_has_clflush)
+ asm volatile("clflush (%0)" :: "r" (tceaddr));
+ else
+ asm volatile("wbinvd":::"memory");
+}
+
+void tce_build(struct iommu_table *tbl, unsigned long index,
+ unsigned int npages, unsigned long uaddr, int direction)
+{
+ u64* tp;
+ u64 t;
+ u64 rpn;
+
+ t = (1 << TCE_READ_SHIFT);
+ if (direction != DMA_TO_DEVICE)
+ t |= (1 << TCE_WRITE_SHIFT);
+
+ tp = ((u64*)tbl->it_base) + index;
+
+ while (npages--) {
+ rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
+ t &= ~TCE_RPN_MASK;
+ t |= (rpn << TCE_RPN_SHIFT);
+
+ *tp = cpu_to_be64(t);
+ flush_tce(tp);
+
+ uaddr += PAGE_SIZE;
+ tp++;
+ }
+}
+
+void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
+{
+ u64* tp;
+
+ tp = ((u64*)tbl->it_base) + index;
+
+ while (npages--) {
+ *tp = cpu_to_be64(0);
+ flush_tce(tp);
+ tp++;
+ }
+}
+
+static inline unsigned int table_size_to_number_of_entries(unsigned char size)
+{
+ /*
+ * size is the order of the table, 0-7
+ * smallest table is 8K entries, so shift result by 13 to
+ * multiply by 8K
+ */
+ return (1 << size) << 13;
+}
+
+static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
+{
+ unsigned int bitmapsz;
+ unsigned int tce_table_index;
+ unsigned long bmppages;
+ int ret;
+
+ tbl->it_busno = dev->bus->number;
+
+ /* set the tce table size - measured in entries */
+ tbl->it_size = table_size_to_number_of_entries(specified_table_size);
+
+ tce_table_index = bus_to_phb(tbl->it_busno);
+ tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
+ if (!tbl->it_base) {
+ printk(KERN_ERR "Calgary: iommu_table_setparms: "
+ "no table allocated?!\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * number of bytes needed for the bitmap size in number of
+ * entries; we need one bit per entry
+ */
+ bitmapsz = tbl->it_size / BITS_PER_BYTE;
+ bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
+ if (!bmppages) {
+ printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ tbl->it_map = (unsigned long*)bmppages;
+
+ memset(tbl->it_map, 0, bitmapsz);
+
+ tbl->it_hint = 0;
+
+ spin_lock_init(&tbl->it_lock);
+
+ return 0;
+
+done:
+ return ret;
+}
+
+int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
+{
+ struct iommu_table *tbl;
+ int ret;
+
+ if (dev->sysdata) {
+ printk(KERN_ERR "Calgary: dev %p has sysdata %p\n",
+ dev, dev->sysdata);
+ BUG();
+ }
+
+ tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ if (!tbl) {
+ printk(KERN_ERR "Calgary: error allocating iommu_table\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = tce_table_setparms(dev, tbl);
+ if (ret)
+ goto free_tbl;
+
+ tce_free(tbl, 0, tbl->it_size);
+
+ tbl->bbar = bbar;
+
+ /*
+ * NUMA is already using the bus's sysdata pointer, so we use
+ * the bus's pci_dev's sysdata instead.
+ */
+ dev->sysdata = tbl;
+
+ return 0;
+
+free_tbl:
+ kfree(tbl);
+done:
+ return ret;
+}
+
+void* alloc_tce_table(void)
+{
+ unsigned int size;
+
+ size = table_size_to_number_of_entries(specified_table_size);
+ size *= TCE_ENTRY_SIZE;
+
+ return __alloc_bootmem_low(size, size, 0);
+}
+
+void free_tce_table(void *tbl)
+{
+ unsigned int size;
+
+ if (!tbl)
+ return;
+
+ size = table_size_to_number_of_entries(specified_table_size);
+ size *= TCE_ENTRY_SIZE;
+
+ free_bootmem(__pa(tbl), size);
+}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7392570f975d..ebbee6f59ff5 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -8,7 +8,7 @@
* Copyright (c) 1995 Markus Kuhn
* Copyright (c) 1996 Ingo Molnar
* Copyright (c) 1998 Andrea Arcangeli
- * Copyright (c) 2002 Vojtech Pavlik
+ * Copyright (c) 2002,2006 Vojtech Pavlik
* Copyright (c) 2003 Andi Kleen
* RTC support code taken from arch/i386/kernel/timers/time_hpet.c
*/
@@ -51,14 +51,21 @@ extern int using_apic_timer;
static char *time_init_gtod(void);
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
int nohpet __initdata = 0;
static int notsc __initdata = 0;
-#undef HPET_HACK_ENABLE_DANGEROUS
+#define USEC_PER_TICK (USEC_PER_SEC / HZ)
+#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
+#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
+
+#define NS_SCALE 10 /* 2^10, carefully chosen */
+#define US_SCALE 32 /* 2^32, arbitralrily chosen */
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
+EXPORT_SYMBOL(cpu_khz);
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
@@ -90,7 +97,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
t = get_cycles_sync();
if (t < vxtime.last_tsc)
t = vxtime.last_tsc; /* hack */
- x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
+ x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
return x;
}
@@ -98,7 +105,7 @@ static inline unsigned int do_gettimeoffset_hpet(void)
{
/* cap counter read to one tick to avoid inconsistencies */
unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
- return (min(counter,hpet_tick) * vxtime.quot) >> 32;
+ return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
}
unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -118,7 +125,7 @@ void do_gettimeofday(struct timeval *tv)
seq = read_seqbegin(&xtime_lock);
sec = xtime.tv_sec;
- usec = xtime.tv_nsec / 1000;
+ usec = xtime.tv_nsec / NSEC_PER_USEC;
/* i386 does some correction here to keep the clock
monotonous even when ntpd is fixing drift.
@@ -129,14 +136,14 @@ void do_gettimeofday(struct timeval *tv)
in arch/x86_64/kernel/vsyscall.c and export all needed
variables in vmlinux.lds. -AK */
- t = (jiffies - wall_jiffies) * (1000000L / HZ) +
+ t = (jiffies - wall_jiffies) * USEC_PER_TICK +
do_gettimeoffset();
usec += t;
} while (read_seqretry(&xtime_lock, seq));
- tv->tv_sec = sec + usec / 1000000;
- tv->tv_usec = usec % 1000000;
+ tv->tv_sec = sec + usec / USEC_PER_SEC;
+ tv->tv_usec = usec % USEC_PER_SEC;
}
EXPORT_SYMBOL(do_gettimeofday);
@@ -157,8 +164,8 @@ int do_settimeofday(struct timespec *tv)
write_seqlock_irq(&xtime_lock);
- nsec -= do_gettimeoffset() * 1000 +
- (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
+ nsec -= do_gettimeoffset() * NSEC_PER_USEC +
+ (jiffies - wall_jiffies) * NSEC_PER_TICK;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -288,7 +295,7 @@ unsigned long long monotonic_clock(void)
this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
- offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
+ offset *= NSEC_PER_TICK / hpet_tick;
} else {
do {
seq = read_seqbegin(&xtime_lock);
@@ -297,7 +304,8 @@ unsigned long long monotonic_clock(void)
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
this_offset = get_cycles_sync();
- offset = (this_offset - last_offset)*1000 / cpu_khz;
+ /* FIXME: 1000 or 1000000? */
+ offset = (this_offset - last_offset)*1000 / cpu_khz;
}
return base + offset;
}
@@ -382,7 +390,7 @@ void main_timer_handler(struct pt_regs *regs)
}
monotonic_base +=
- (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
+ (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
vxtime.last = offset;
#ifdef CONFIG_X86_PM_TIMER
@@ -391,24 +399,25 @@ void main_timer_handler(struct pt_regs *regs)
#endif
} else {
offset = (((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
+ vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
if (offset < 0)
offset = 0;
- if (offset > (USEC_PER_SEC / HZ)) {
- lost = offset / (USEC_PER_SEC / HZ);
- offset %= (USEC_PER_SEC / HZ);
+ if (offset > USEC_PER_TICK) {
+ lost = offset / USEC_PER_TICK;
+ offset %= USEC_PER_TICK;
}
- monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
+ /* FIXME: 1000 or 1000000? */
+ monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
if ((((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> 32) < offset)
+ vxtime.tsc_quot) >> US_SCALE) < offset)
vxtime.last_tsc = tsc -
- (((long) offset << 32) / vxtime.tsc_quot) - 1;
+ (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
}
if (lost > 0) {
@@ -468,16 +477,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static unsigned int cyc2ns_scale __read_mostly;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+ cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+ return (cyc * cyc2ns_scale) >> NS_SCALE;
}
unsigned long long sched_clock(void)
@@ -490,7 +498,7 @@ unsigned long long sched_clock(void)
Disadvantage is a small drift between CPUs in some configurations,
but that should be tolerable. */
if (__vxtime.mode == VXTIME_HPET)
- return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
+ return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
#endif
/* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
@@ -633,7 +641,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
}
set_cyc2ns_scale(cpu_khz_ref);
@@ -789,8 +797,8 @@ static int hpet_timer_stop_set_go(unsigned long tick)
if (hpet_use_timer) {
hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT, HPET_T0_CFG);
- hpet_writel(hpet_tick, HPET_T0_CMP);
- hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
cfg |= HPET_CFG_LEGACY;
}
/*
@@ -825,8 +833,7 @@ static int hpet_init(void)
if (hpet_period < 100000 || hpet_period > 100000000)
return -1;
- hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
- hpet_period;
+ hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
hpet_use_timer = (id & HPET_ID_LEGSUP);
@@ -890,18 +897,6 @@ void __init time_init(void)
char *timename;
char *gtod;
-#ifdef HPET_HACK_ENABLE_DANGEROUS
- if (!vxtime.hpet_address) {
- printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
- "manually!\n");
- outl(0x800038a0, 0xcf8);
- outl(0xff000001, 0xcfc);
- outl(0x800038a0, 0xcf8);
- vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
- printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
- "at %#lx.\n", vxtime.hpet_address);
- }
-#endif
if (nohpet)
vxtime.hpet_address = 0;
@@ -912,7 +907,7 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
if (!hpet_init())
- vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
+ vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period;
else
vxtime.hpet_address = 0;
@@ -941,8 +936,8 @@ void __init time_init(void)
vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
- vxtime.quot = (1000000L << 32) / vxtime_hz;
- vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
+ vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
@@ -956,10 +951,10 @@ void __init time_init(void)
__cpuinit int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
- if (oem_force_hpet_timer())
+ if (apic_is_clustered_box())
return 1;
/* Intel systems are normally all synchronized. Exceptions
- are handled in the OEM check above. */
+ are handled in the check above. */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return 0;
#endif
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index cea335e8746c..3d11a2fe45b7 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -6,8 +6,6 @@
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
*/
/*
@@ -31,6 +29,7 @@
#include <linux/nmi.h>
#include <linux/kprobes.h>
#include <linux/kexec.h>
+#include <linux/unwind.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -41,7 +40,7 @@
#include <asm/i387.h>
#include <asm/kdebug.h>
#include <asm/processor.h>
-
+#include <asm/unwind.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/pda.h>
@@ -71,6 +70,7 @@ asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
ATOMIC_NOTIFIER_HEAD(die_chain);
+EXPORT_SYMBOL(die_chain);
int register_die_notifier(struct notifier_block *nb)
{
@@ -107,7 +107,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_enable_no_resched();
}
-static int kstack_depth_to_print = 10;
+static int kstack_depth_to_print = 12;
+static int call_trace = 1;
#ifdef CONFIG_KALLSYMS
#include <linux/kallsyms.h>
@@ -191,6 +192,25 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
return NULL;
}
+static int show_trace_unwind(struct unwind_frame_info *info, void *context)
+{
+ int i = 11, n = 0;
+
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ ++n;
+ if (i > 50) {
+ printk("\n ");
+ i = 7;
+ } else
+ i += printk(" ");
+ i += printk_address(UNW_PC(info));
+ if (arch_unw_user_mode(info))
+ break;
+ }
+ printk("\n");
+ return n;
+}
+
/*
* x86-64 can have upto three kernel stacks:
* process stack
@@ -198,15 +218,39 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-void show_trace(unsigned long *stack)
+void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
{
const unsigned cpu = safe_smp_processor_id();
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
- int i;
+ int i = 11;
unsigned used = 0;
printk("\nCall Trace:");
+ if (!tsk)
+ tsk = current;
+
+ if (call_trace >= 0) {
+ int unw_ret = 0;
+ struct unwind_frame_info info;
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, tsk, regs) == 0)
+ unw_ret = show_trace_unwind(&info, NULL);
+ } else if (tsk == current)
+ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
+ else {
+ if (unwind_init_blocked(&info, tsk) == 0)
+ unw_ret = show_trace_unwind(&info, NULL);
+ }
+ if (unw_ret > 0) {
+ if (call_trace > 0)
+ return;
+ printk("Legacy call trace:");
+ i = 18;
+ }
+ }
+
#define HANDLE_STACK(cond) \
do while (cond) { \
unsigned long addr = *stack++; \
@@ -229,7 +273,7 @@ void show_trace(unsigned long *stack)
} \
} while (0)
- for(i = 11; ; ) {
+ for(; ; ) {
const char *id;
unsigned long *estack_end;
estack_end = in_exception_stack(cpu, (unsigned long)stack,
@@ -264,7 +308,7 @@ void show_trace(unsigned long *stack)
printk("\n");
}
-void show_stack(struct task_struct *tsk, unsigned long * rsp)
+static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
{
unsigned long *stack;
int i;
@@ -298,7 +342,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
printk("%016lx ", *stack++);
touch_nmi_watchdog();
}
- show_trace((unsigned long *)rsp);
+ show_trace(tsk, regs, rsp);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long * rsp)
+{
+ _show_stack(tsk, NULL, rsp);
}
/*
@@ -307,7 +356,7 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
void dump_stack(void)
{
unsigned long dummy;
- show_trace(&dummy);
+ show_trace(NULL, NULL, &dummy);
}
EXPORT_SYMBOL(dump_stack);
@@ -334,7 +383,7 @@ void show_registers(struct pt_regs *regs)
if (in_kernel) {
printk("Stack: ");
- show_stack(NULL, (unsigned long*)rsp);
+ _show_stack(NULL, regs, (unsigned long*)rsp);
printk("\nCode: ");
if (regs->rip < PAGE_OFFSET)
@@ -383,6 +432,7 @@ void out_of_line_bug(void)
{
BUG();
}
+EXPORT_SYMBOL(out_of_line_bug);
#endif
static DEFINE_SPINLOCK(die_lock);
@@ -1012,3 +1062,14 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
+static int __init call_trace_setup(char *s)
+{
+ if (strcmp(s, "old") == 0)
+ call_trace = -1;
+ else if (strcmp(s, "both") == 0)
+ call_trace = 0;
+ else if (strcmp(s, "new") == 0)
+ call_trace = 1;
+ return 1;
+}
+__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b81f473c4a19..1c6a5f322919 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -45,6 +45,15 @@ SECTIONS
RODATA
+#ifdef CONFIG_STACK_UNWIND
+ . = ALIGN(8);
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
+ __start_unwind = .;
+ *(.eh_frame)
+ __end_unwind = .;
+ }
+#endif
+
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
*(.data)
@@ -131,6 +140,26 @@ SECTIONS
*(.data.page_aligned)
}
+ /* might get freed after init */
+ . = ALIGN(4096);
+ __smp_alt_begin = .;
+ __smp_alt_instructions = .;
+ .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+ *(.smp_altinstructions)
+ }
+ __smp_alt_instructions_end = .;
+ . = ALIGN(8);
+ __smp_locks = .;
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ *(.smp_locks)
+ }
+ __smp_locks_end = .;
+ .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
+ *(.smp_altinstr_replacement)
+ }
+ . = ALIGN(4096);
+ __smp_alt_end = .;
+
. = ALIGN(4096); /* Init code and data */
__init_begin = .;
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 9468fb20b0bc..f603037df162 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -107,7 +107,7 @@ static __always_inline long time_syscall(long *t)
int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
{
- if (unlikely(!__sysctl_vsyscall))
+ if (!__sysctl_vsyscall)
return gettimeofday(tv,tz);
if (tv)
do_vgettimeofday(tv);
@@ -120,7 +120,7 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
* unlikely */
time_t __vsyscall(1) vtime(time_t *t)
{
- if (unlikely(!__sysctl_vsyscall))
+ if (!__sysctl_vsyscall)
return time_syscall(t);
else if (t)
*t = __xtime.tv_sec;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 1def21c9f7cd..370952c4ff22 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -1,66 +1,21 @@
+/* Exports for assembly files.
+ All C exports should go in the respective C files. */
+
#include <linux/config.h>
#include <linux/module.h>
#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/syscalls.h>
-#include <linux/tty.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
-#include <asm/i387.h>
#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
-#include <asm/desc.h>
#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/nmi.h>
-#include <asm/kdebug.h>
-#include <asm/unistd.h>
-#include <asm/tlbflush.h>
-#include <asm/kdebug.h>
-
-extern spinlock_t rtc_lock;
-#ifdef CONFIG_SMP
-extern void __write_lock_failed(rwlock_t *rw);
-extern void __read_lock_failed(rwlock_t *rw);
-#endif
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-//EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(ip_compute_csum);
-/* Delay loops */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -71,42 +26,20 @@ EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_user_generic);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_in_user);
-EXPORT_SYMBOL(strnlen_user);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(_cpu_pda);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-
-EXPORT_SYMBOL(smp_call_function);
-EXPORT_SYMBOL(cpu_callout_map);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
/* Export string functions. We normally rely on gcc builtin for most of these,
but gcc sometimes decides not to inline them. */
#undef memcpy
@@ -114,51 +47,14 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
#undef memmove
extern void * memset(void *,int,__kernel_size_t);
-extern size_t strlen(const char *);
-extern void * memmove(void * dest,const void *src,size_t count);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
-#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-/* prototypes are wrong, these are assembly with custom calling functions */
-extern void rwsem_down_read_failed_thunk(void);
-extern void rwsem_wake_thunk(void);
-extern void rwsem_downgrade_thunk(void);
-extern void rwsem_down_write_failed_thunk(void);
-EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
-EXPORT_SYMBOL(rwsem_wake_thunk);
-EXPORT_SYMBOL(rwsem_downgrade_thunk);
-EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
-#endif
-
EXPORT_SYMBOL(empty_zero_page);
-
-EXPORT_SYMBOL(die_chain);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_sibling_map);
-EXPORT_SYMBOL(smp_num_siblings);
-#endif
-
-#ifdef CONFIG_BUG
-EXPORT_SYMBOL(out_of_line_bug);
-#endif
-
EXPORT_SYMBOL(init_level4_pgt);
-
-extern unsigned long __supported_pte_mask;
-EXPORT_SYMBOL(__supported_pte_mask);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(flush_tlb_page);
-#endif
-
-EXPORT_SYMBOL(cpu_khz);
-
EXPORT_SYMBOL(load_gs_index);
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index 5384e227cdf6..c493735218da 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -147,4 +147,5 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
}
+EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index 94323f20816e..b1320ec58428 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -109,6 +109,7 @@ csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
{
return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
__u32 len, unsigned short proto, unsigned int sum)
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 03c460cbdd1c..b6cd3cca2f45 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -9,6 +9,7 @@
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <asm/delay.h>
@@ -36,18 +37,22 @@ void __delay(unsigned long loops)
}
while((now-bclock) < loops);
}
+EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
__delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
}
+EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
+EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c
index e93d5255fdc9..751ebae8ec42 100644
--- a/arch/x86_64/lib/memmove.c
+++ b/arch/x86_64/lib/memmove.c
@@ -3,12 +3,13 @@
*/
#define _STRING_C
#include <linux/string.h>
+#include <linux/module.h>
#undef memmove
void *memmove(void * dest,const void *src,size_t count)
{
if (dest < src) {
- __inline_memcpy(dest,src,count);
+ return memcpy(dest,src,count);
} else {
char *p = (char *) dest + count;
char *s = (char *) src + count;
@@ -17,3 +18,4 @@ void *memmove(void * dest,const void *src,size_t count)
}
return dest;
}
+EXPORT_SYMBOL(memmove);
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index 9bc2c295818e..893d43f838cc 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -5,6 +5,7 @@
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
+#include <linux/module.h>
#include <asm/uaccess.h>
/*
@@ -47,15 +48,17 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+EXPORT_SYMBOL(__strncpy_from_user);
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
- __do_strncpy_from_user(dst, src, count, res);
+ return __strncpy_from_user(dst, src, count);
return res;
}
+EXPORT_SYMBOL(strncpy_from_user);
/*
* Zero Userspace
@@ -94,7 +97,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
[zero] "r" (0UL), [eight] "r" (8UL));
return size;
}
-
+EXPORT_SYMBOL(__clear_user);
unsigned long clear_user(void __user *to, unsigned long n)
{
@@ -102,6 +105,7 @@ unsigned long clear_user(void __user *to, unsigned long n)
return __clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(clear_user);
/*
* Return the size of a string (including the ending 0)
@@ -125,6 +129,7 @@ long __strnlen_user(const char __user *s, long n)
s++;
}
}
+EXPORT_SYMBOL(__strnlen_user);
long strnlen_user(const char __user *s, long n)
{
@@ -132,6 +137,7 @@ long strnlen_user(const char __user *s, long n)
return 0;
return __strnlen_user(s, n);
}
+EXPORT_SYMBOL(strnlen_user);
long strlen_user(const char __user *s)
{
@@ -147,6 +153,7 @@ long strlen_user(const char __user *s)
s++;
}
}
+EXPORT_SYMBOL(strlen_user);
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
@@ -155,3 +162,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned le
}
return len;
}
+EXPORT_SYMBOL(copy_in_user);
+
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 55250593d8c9..08dc696f54ee 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -41,6 +41,41 @@
#define PF_RSVD (1<<3)
#define PF_INSTR (1<<4)
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
@@ -160,7 +195,7 @@ void dump_pagetable(unsigned long address)
printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd)) goto ret;
- pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
+ pud = pud_offset(pgd, address);
if (bad_address(pud)) goto bad;
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud)) goto ret;
@@ -348,7 +383,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0)
return;
}
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
/*
@@ -358,7 +393,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
return;
@@ -410,8 +445,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (error_code & 4) {
- // XXX: align red zone size with ABI
- if (address + 128 < regs->rsp)
+ /* Allow userspace just enough access below the stack pointer
+ * to let the 'enter' instruction work.
+ */
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4ba34e95d835..95bd232ff0cf 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/memory_hotplug.h>
@@ -41,8 +42,6 @@
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
-#include <asm/dma-mapping.h>
-#include <asm/swiotlb.h>
#ifndef Dprintk
#define Dprintk(x...)
@@ -90,8 +89,6 @@ void show_mem(void)
printk(KERN_INFO "%lu pages swap cached\n",cached);
}
-/* References to section boundaries */
-
int after_bootmem;
static __init void *spp_getpage(void)
@@ -261,9 +258,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
unsigned long entry;
- if (address > end) {
- for (; i < PTRS_PER_PMD; i++, pmd++)
- set_pmd(pmd, __pmd(0));
+ if (address >= end) {
+ if (!after_bootmem)
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
break;
}
entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
@@ -341,7 +339,8 @@ static void __init find_early_table_space(unsigned long end)
table_end = table_start;
early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ end, table_start << PAGE_SHIFT,
+ (table_start << PAGE_SHIFT) + tables);
}
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
@@ -372,7 +371,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
pud_t *pud;
if (after_bootmem)
- pud = pud_offset_k(pgd, start & PGDIR_MASK);
+ pud = pud_offset(pgd, start & PGDIR_MASK);
else
pud = alloc_low_page(&map, &pud_phys);
@@ -508,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
/*
* Memory hotplug specific functions
*/
-#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
-
void online_page(struct page *page)
{
ClearPageReserved(page);
@@ -519,31 +516,17 @@ void online_page(struct page *page)
num_physpages++;
}
-#ifndef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
+ * via probe interface of sysfs. If acpi notifies hot-add event, then it
+ * can tell node id by searching dsdt. But, probe interface doesn't have
+ * node id. So, return 0 as node id at this time.
*/
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
{
- int err = -EIO;
- unsigned long pfn;
- unsigned long total = 0, mem = 0;
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (pfn_valid(pfn)) {
- online_page(pfn_to_page(pfn));
- err = 0;
- mem++;
- }
- total++;
- }
- if (!err) {
- z->spanned_pages += total;
- z->present_pages += mem;
- z->zone_pgdat->node_spanned_pages += total;
- z->zone_pgdat->node_present_pages += mem;
- }
- return err;
+ return 0;
}
#endif
@@ -551,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
* Memory is added always to NORMAL zone. This means you will never get
* additional DMA/DMA32 memory.
*/
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
- struct pglist_data *pgdat = NODE_DATA(0);
+ struct pglist_data *pgdat = NODE_DATA(nid);
struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -570,7 +553,7 @@ error:
printk("%s: Problem encountered in __add_pages!\n", __func__);
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory);
+EXPORT_SYMBOL_GPL(arch_add_memory);
int remove_memory(u64 start, u64 size)
{
@@ -578,7 +561,33 @@ int remove_memory(u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(remove_memory);
-#endif
+#else /* CONFIG_MEMORY_HOTPLUG */
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+ int err = -EIO;
+ unsigned long pfn;
+ unsigned long total = 0, mem = 0;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (pfn_valid(pfn)) {
+ online_page(pfn_to_page(pfn));
+ err = 0;
+ mem++;
+ }
+ total++;
+ }
+ if (!err) {
+ z->spanned_pages += total;
+ z->present_pages += mem;
+ z->zone_pgdat->node_spanned_pages += total;
+ z->zone_pgdat->node_present_pages += mem;
+ }
+ return err;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
@@ -587,10 +596,7 @@ void __init mem_init(void)
{
long codesize, reservedpages, datasize, initsize;
-#ifdef CONFIG_SWIOTLB
- pci_swiotlb_init();
-#endif
- no_iommu_init();
+ pci_iommu_alloc();
/* How many end-of-memory variables you have, grandma! */
max_low_pfn = end_pfn;
@@ -644,20 +650,31 @@ void __init mem_init(void)
#endif
}
-void free_initmem(void)
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ if (begin >= end)
+ return;
+
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
+ memset((void *)(addr & ~(PAGE_SIZE-1)),
+ POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
- memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
- printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+void free_initmem(void)
+{
+ memset(__initdata_begin, POISON_FREE_INITDATA,
+ __initdata_end - __initdata_begin);
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
}
#ifdef CONFIG_DEBUG_RODATA
@@ -686,15 +703,7 @@ void mark_rodata_ro(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_init_pages("initrd memory", start, end);
}
#endif
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ae207064201e..45d7d823c3b8 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
return (__force void __iomem *) (offset + (char *)addr);
}
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
@@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, _PAGE_PCD);
}
+EXPORT_SYMBOL(ioremap_nocache);
/**
* iounmap - Free a IO remapping
@@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr)
BUG_ON(p != o || o == NULL);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
+
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2a0..b50a7c7c47f8 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,6 +2,7 @@
#include <linux/pci.h>
#include <asm/mpspec.h>
#include <linux/cpumask.h>
+#include <asm/k8.h>
/*
* This discovers the pcibus <-> node mapping on AMD K8.
@@ -18,7 +19,6 @@
#define NR_LDT_BUS_NUMBER_REGISTERS 3
#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
-#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
/**
* fill_mp_bus_to_cpumask()
@@ -28,8 +28,7 @@
__init static int
fill_mp_bus_to_cpumask(void)
{
- struct pci_dev *nb_dev = NULL;
- int i, j;
+ int i, j, k;
u32 ldtbus, nid;
static int lbnr[3] = {
LDT_BUS_NUMBER_REGISTER_0,
@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void)
LDT_BUS_NUMBER_REGISTER_2
};
- while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
+ cache_k8_northbridges();
+ for (k = 0; k < num_k8_northbridges; k++) {
+ struct pci_dev *nb_dev = k8_northbridges[k];
pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 98fac8489aed..3a3a4c66ef87 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -71,7 +71,7 @@ archprepare: $(archinc)/.platform
# Update machine cpu and platform symlinks if something which affects
# them changed.
-$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/MARKER
+$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
@echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)'
$(Q)mkdir -p $(archinc)
$(Q)mkdir -p $(archinc)/xtensa
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 51f9bed455fa..1cf744ee0959 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -100,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -181,7 +181,7 @@ void __init init_IRQ(void)
int i;
for (i=0; i < XTENSA_NR_IRQS; i++)
- irq_desc[i].handler = &xtensa_irq_type;
+ irq_desc[i].chip = &xtensa_irq_type;
cached_irq_mask = 0;
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index c6f471b9eaa0..eda029fc8972 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -71,13 +71,13 @@ static int pci_bus_count;
* which might have be mirrored at 0x0100-0x03ff..
*/
void
-pcibios_align_resource(void *data, struct resource *res, unsigned long size,
- unsigned long align)
+pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
+ resource_size_t align)
{
struct pci_dev *dev = data;
if (res->flags & IORESOURCE_IO) {
- unsigned long start = res->start;
+ resource_size_t start = res->start;
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 937d81f62f43..fe14909f45e0 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,7 +29,7 @@
extern volatile unsigned long wall_jiffies;
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 225d64d73f04..27e409089a7b 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -461,7 +461,7 @@ void show_code(unsigned int *pc)
}
}
-spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 1ec5df466708..3af31ed49a9c 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -892,7 +892,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
}
/*
- * as_can_anticipate indicates weather we should either run arq
+ * as_can_anticipate indicates whether we should either run arq
* or keep anticipating a better request.
*/
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0603ab2f3692..eee03a3876a3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2745,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 0;
/*
- * not contigious
+ * not contiguous
*/
if (req->sector + req->nr_sectors != next->sector)
return 0;
@@ -3403,7 +3403,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
-static struct notifier_block blk_cpu_notifier = {
+static struct notifier_block __devinitdata blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
@@ -3415,7 +3415,7 @@ static struct notifier_block blk_cpu_notifier = {
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
- * unless the driver actually implements this in its completionc callback
+ * unless the driver actually implements this in its completion callback
* through requeueing. Theh actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
@@ -3541,9 +3541,7 @@ int __init blk_dev_init(void)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&blk_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&blk_cpu_notifier);
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index c442f2e7ce46..ba133d557045 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -337,7 +337,7 @@ config CRYPTO_CRC32C
config CRYPTO_TEST
tristate "Testing module"
- depends on CRYPTO
+ depends on CRYPTO && m
help
Quick & dirty crypto test module.
diff --git a/crypto/aes.c b/crypto/aes.c
index a5017292e066..a038711831e7 100644
--- a/crypto/aes.c
+++ b/crypto/aes.c
@@ -248,10 +248,10 @@ gen_tabs (void)
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
}
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct aes_ctx *ctx = ctx_arg;
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 i, t, u, v, w;
@@ -318,9 +318,9 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
f_rl(bo, bi, 2, k); \
f_rl(bo, bi, 3, k)
-static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct aes_ctx *ctx = ctx_arg;
+ const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
@@ -373,9 +373,9 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
i_rl(bo, bi, 2, k); \
i_rl(bo, bi, 3, k)
-static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct aes_ctx *ctx = ctx_arg;
+ const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 2c796bdb91a6..7e2e1a29800e 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -460,16 +460,15 @@ static const u32 rc[] = {
0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U,
};
-static int anubis_setkey(void *ctx_arg, const u8 *in_key,
+static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
{
+ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
- struct anubis_ctx *ctx = ctx_arg;
-
switch (key_len)
{
case 16: case 20: case 24: case 28:
@@ -660,15 +659,15 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
dst[i] = cpu_to_be32(inter[i]);
}
-static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
+static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct anubis_ctx *ctx = ctx_arg;
+ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->E, dst, src, ctx->R);
}
-static void anubis_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
+static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct anubis_ctx *ctx = ctx_arg;
+ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->D, dst, src, ctx->R);
}
diff --git a/crypto/api.c b/crypto/api.c
index 80bba637fba7..c11ec1fd4f18 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -188,13 +188,16 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
if (crypto_init_flags(tfm, flags))
goto out_free_tfm;
- if (crypto_init_ops(tfm)) {
- crypto_exit_ops(tfm);
+ if (crypto_init_ops(tfm))
goto out_free_tfm;
- }
+
+ if (alg->cra_init && alg->cra_init(tfm))
+ goto cra_init_failed;
goto out;
+cra_init_failed:
+ crypto_exit_ops(tfm);
out_free_tfm:
kfree(tfm);
tfm = NULL;
@@ -215,6 +218,8 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
alg = tfm->__crt_alg;
size = sizeof(*tfm) + alg->cra_ctxsize;
+ if (alg->cra_exit)
+ alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_alg_put(alg);
memset(tfm, 0, size);
@@ -224,7 +229,7 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
static inline int crypto_set_driver_name(struct crypto_alg *alg)
{
static const char suffix[] = "-generic";
- char *driver_name = (char *)alg->cra_driver_name;
+ char *driver_name = alg->cra_driver_name;
int len;
if (*driver_name)
@@ -262,13 +267,13 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) {
+ if (q == alg) {
ret = -EEXIST;
goto out;
}
}
- list_add_tail(&alg->cra_list, &crypto_alg_list);
+ list_add(&alg->cra_list, &crypto_alg_list);
out:
up_write(&crypto_alg_sem);
return ret;
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 9efbcaae88a1..5edc6a65b987 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -24,9 +24,10 @@ struct arc4_ctx {
u8 x, y;
};
-static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
+static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct arc4_ctx *ctx = ctx_arg;
+ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
int i, j = 0, k = 0;
ctx->x = 1;
@@ -48,9 +49,9 @@ static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u
return 0;
}
-static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in)
+static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct arc4_ctx *ctx = ctx_arg;
+ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
u8 *const S = ctx->S;
u8 x = ctx->x;
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 7f710b201f20..490265f42b3b 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -349,7 +349,7 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
dst[1] = yl;
}
-static void bf_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
@@ -357,17 +357,18 @@ static void bf_encrypt(void *ctx, u8 *dst, const u8 *src)
in32[0] = be32_to_cpu(in_blk[0]);
in32[1] = be32_to_cpu(in_blk[1]);
- encrypt_block(ctx, out32, in32);
+ encrypt_block(crypto_tfm_ctx(tfm), out32, in32);
out_blk[0] = cpu_to_be32(out32[0]);
out_blk[1] = cpu_to_be32(out32[1]);
}
-static void bf_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
+ struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
- const u32 *P = ((struct bf_ctx *)ctx)->p;
- const u32 *S = ((struct bf_ctx *)ctx)->s;
+ const u32 *P = ctx->p;
+ const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
@@ -398,12 +399,14 @@ static void bf_decrypt(void *ctx, u8 *dst, const u8 *src)
/*
* Calculates the blowfish S and P boxes for encryption and decryption.
*/
-static int bf_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+static int bf_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
+ struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *P = ctx->p;
+ u32 *S = ctx->s;
short i, j, count;
u32 data[2], temp;
- u32 *P = ((struct bf_ctx *)ctx)->p;
- u32 *S = ((struct bf_ctx *)ctx)->s;
/* Copy the initialization s-boxes */
for (i = 0, count = 0; i < 256; i++)
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 8834c8580c04..08eef58c1d3d 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -577,9 +577,9 @@ static const u32 sb8[256] = {
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
-static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
+static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
- struct cast5_ctx *c = (struct cast5_ctx *) ctx;
+ struct cast5_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
@@ -642,9 +642,9 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
dst[1] = cpu_to_be32(l);
}
-static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
+static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
- struct cast5_ctx *c = (struct cast5_ctx *) ctx;
+ struct cast5_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
@@ -769,15 +769,15 @@ static void key_schedule(u32 * x, u32 * z, u32 * k)
}
-static int
-cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags)
+static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned key_len, u32 *flags)
{
+ struct cast5_ctx *c = crypto_tfm_ctx(tfm);
int i;
u32 x[4];
u32 z[4];
u32 k[16];
__be32 p_key[4];
- struct cast5_ctx *c = (struct cast5_ctx *) ctx;
if (key_len < 5 || key_len > 16) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 9e28740ba775..08e33bfc3ad1 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -381,13 +381,13 @@ static inline void W(u32 *key, unsigned int i) {
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
-static int
-cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags)
+static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned key_len, u32 *flags)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
- struct cast6_ctx *c = (struct cast6_ctx *) ctx;
+ struct cast6_ctx *c = crypto_tfm_ctx(tfm);
if (key_len < 16 || key_len > 32 || key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -444,8 +444,9 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
-static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
- struct cast6_ctx * c = (struct cast6_ctx *)ctx;
+static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
+{
+ struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
@@ -476,8 +477,8 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
dst[3] = cpu_to_be32(block[3]);
}
-static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
- struct cast6_ctx * c = (struct cast6_ctx *)ctx;
+static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) {
+ struct cast6_ctx * c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 65bcea0cd17c..b899eb97abd7 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -187,7 +187,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
int bsize = crypto_tfm_alg_blocksize(tfm);
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
u8 *iv = desc->info;
unsigned int done = 0;
@@ -195,7 +195,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
do {
xor(iv, src);
- fn(crypto_tfm_ctx(tfm), dst, iv);
+ fn(tfm, dst, iv);
memcpy(iv, dst, bsize);
src += bsize;
@@ -218,7 +218,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
u8 **dst_p = src == dst ? &buf : &dst;
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
u8 *iv = desc->info;
unsigned int done = 0;
@@ -227,7 +227,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
do {
u8 *tmp_dst = *dst_p;
- fn(crypto_tfm_ctx(tfm), tmp_dst, src);
+ fn(tfm, tmp_dst, src);
xor(tmp_dst, iv);
memcpy(iv, src, bsize);
if (tmp_dst != dst)
@@ -245,13 +245,13 @@ static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
{
struct crypto_tfm *tfm = desc->tfm;
int bsize = crypto_tfm_alg_blocksize(tfm);
- void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
unsigned int done = 0;
nbytes -= bsize;
do {
- fn(crypto_tfm_ctx(tfm), dst, src);
+ fn(tfm, dst, src);
src += bsize;
dst += bsize;
@@ -268,7 +268,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
} else
- return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
+ return cia->cia_setkey(tfm, key, keylen,
&tfm->crt_flags);
}
diff --git a/crypto/compress.c b/crypto/compress.c
index eb36d9364da3..eca182aa3380 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -22,8 +22,7 @@ static int crypto_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm),
- src, slen, dst,
+ return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
dlen);
}
@@ -31,8 +30,7 @@ static int crypto_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm),
- src, slen, dst,
+ return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
dlen);
}
@@ -43,21 +41,14 @@ int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
int crypto_init_compress_ops(struct crypto_tfm *tfm)
{
- int ret = 0;
struct compress_tfm *ops = &tfm->crt_compress;
-
- ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm));
- if (ret)
- goto out;
ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress;
-out:
- return ret;
+ return 0;
}
void crypto_exit_compress_ops(struct crypto_tfm *tfm)
{
- tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm));
}
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 953362423a5c..f2660123aeb4 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -31,9 +31,9 @@ struct chksum_ctx {
* crc using table.
*/
-static void chksum_init(void *ctx)
+static void chksum_init(struct crypto_tfm *tfm)
{
- struct chksum_ctx *mctx = ctx;
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->crc = ~(u32)0; /* common usage */
}
@@ -43,10 +43,10 @@ static void chksum_init(void *ctx)
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
-static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
+static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
- struct chksum_ctx *mctx = ctx;
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
if (keylen != sizeof(mctx->crc)) {
if (flags)
@@ -57,9 +57,10 @@ static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen,
return 0;
}
-static void chksum_update(void *ctx, const u8 *data, unsigned int length)
+static void chksum_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int length)
{
- struct chksum_ctx *mctx = ctx;
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
u32 mcrc;
mcrc = crc32c(mctx->crc, data, (size_t)length);
@@ -67,9 +68,9 @@ static void chksum_update(void *ctx, const u8 *data, unsigned int length)
mctx->crc = mcrc;
}
-static void chksum_final(void *ctx, u8 *out)
+static void chksum_final(struct crypto_tfm *tfm, u8 *out)
{
- struct chksum_ctx *mctx = ctx;
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
u32 mcrc = (mctx->crc ^ ~(u32)0);
*(u32 *)out = __le32_to_cpu(mcrc);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 3fcf6e887e87..a0d956b52949 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -27,8 +27,8 @@
#define NULL_BLOCK_SIZE 1
#define NULL_DIGEST_SIZE 0
-static int null_compress(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+static int null_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
{
if (slen > *dlen)
return -EINVAL;
@@ -37,20 +37,21 @@ static int null_compress(void *ctx, const u8 *src, unsigned int slen,
return 0;
}
-static void null_init(void *ctx)
+static void null_init(struct crypto_tfm *tfm)
{ }
-static void null_update(void *ctx, const u8 *data, unsigned int len)
+static void null_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{ }
-static void null_final(void *ctx, u8 *out)
+static void null_final(struct crypto_tfm *tfm, u8 *out)
{ }
-static int null_setkey(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags)
+static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{ return 0; }
-static void null_crypt(void *ctx, u8 *dst, const u8 *src)
+static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
memcpy(dst, src, NULL_BLOCK_SIZE);
}
diff --git a/crypto/deflate.c b/crypto/deflate.c
index f209368d62ae..6588bbf82e9b 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -102,8 +102,9 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
kfree(ctx->decomp_stream.workspace);
}
-static int deflate_init(void *ctx)
+static int deflate_init(struct crypto_tfm *tfm)
{
+ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = deflate_comp_init(ctx);
@@ -116,17 +117,19 @@ out:
return ret;
}
-static void deflate_exit(void *ctx)
+static void deflate_exit(struct crypto_tfm *tfm)
{
+ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
-static int deflate_compress(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
{
int ret = 0;
- struct deflate_ctx *dctx = ctx;
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
struct z_stream_s *stream = &dctx->comp_stream;
ret = zlib_deflateReset(stream);
@@ -151,12 +154,12 @@ out:
return ret;
}
-static int deflate_decompress(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
{
int ret = 0;
- struct deflate_ctx *dctx = ctx;
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
struct z_stream_s *stream = &dctx->decomp_stream;
ret = zlib_inflateReset(stream);
@@ -198,9 +201,9 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct deflate_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
+ .cra_init = deflate_init,
+ .cra_exit = deflate_exit,
.cra_u = { .compress = {
- .coa_init = deflate_init,
- .coa_exit = deflate_exit,
.coa_compress = deflate_compress,
.coa_decompress = deflate_decompress } }
};
diff --git a/crypto/des.c b/crypto/des.c
index 2d74cab40c3e..a9d3c235a6af 100644
--- a/crypto/des.c
+++ b/crypto/des.c
@@ -783,9 +783,10 @@ static void dkey(u32 *pe, const u8 *k)
}
}
-static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
- struct des_ctx *dctx = ctx;
+ struct des_ctx *dctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
int ret;
@@ -803,9 +804,10 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
return 0;
}
-static void des_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- const u32 *K = ((struct des_ctx *)ctx)->expkey;
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
+ const u32 *K = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 L, R, A, B;
@@ -825,9 +827,10 @@ static void des_encrypt(void *ctx, u8 *dst, const u8 *src)
d[1] = cpu_to_le32(L);
}
-static void des_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- const u32 *K = ((struct des_ctx *)ctx)->expkey + DES_EXPKEY_WORDS - 2;
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
+ const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 L, R, A, B;
@@ -860,11 +863,11 @@ static void des_decrypt(void *ctx, u8 *dst, const u8 *src)
* property.
*
*/
-static int des3_ede_setkey(void *ctx, const u8 *key,
+static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
{
const u32 *K = (const u32 *)key;
- struct des3_ede_ctx *dctx = ctx;
+ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *expkey = dctx->expkey;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -881,9 +884,9 @@ static int des3_ede_setkey(void *ctx, const u8 *key,
return 0;
}
-static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct des3_ede_ctx *dctx = ctx;
+ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
const u32 *K = dctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
@@ -912,9 +915,9 @@ static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src)
d[1] = cpu_to_le32(L);
}
-static void des3_ede_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct des3_ede_ctx *dctx = ctx;
+ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
diff --git a/crypto/digest.c b/crypto/digest.c
index d9b6ac9dbf8d..603006a7bef2 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -20,13 +20,14 @@
static void init(struct crypto_tfm *tfm)
{
- tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm));
+ tfm->__crt_alg->cra_digest.dia_init(tfm);
}
static void update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
{
unsigned int i;
+ unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
for (i = 0; i < nsg; i++) {
@@ -38,12 +39,22 @@ static void update(struct crypto_tfm *tfm,
unsigned int bytes_from_page = min(l, ((unsigned int)
(PAGE_SIZE)) -
offset);
- char *p = crypto_kmap(pg, 0) + offset;
+ char *src = crypto_kmap(pg, 0);
+ char *p = src + offset;
- tfm->__crt_alg->cra_digest.dia_update
- (crypto_tfm_ctx(tfm), p,
- bytes_from_page);
- crypto_kunmap(p, 0);
+ if (unlikely(offset & alignmask)) {
+ unsigned int bytes =
+ alignmask + 1 - (offset & alignmask);
+ bytes = min(bytes, bytes_from_page);
+ tfm->__crt_alg->cra_digest.dia_update(tfm, p,
+ bytes);
+ p += bytes;
+ bytes_from_page -= bytes;
+ l -= bytes;
+ }
+ tfm->__crt_alg->cra_digest.dia_update(tfm, p,
+ bytes_from_page);
+ crypto_kunmap(src, 0);
crypto_yield(tfm);
offset = 0;
pg++;
@@ -54,7 +65,15 @@ static void update(struct crypto_tfm *tfm,
static void final(struct crypto_tfm *tfm, u8 *out)
{
- tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ if (unlikely((unsigned long)out & alignmask)) {
+ unsigned int size = crypto_tfm_alg_digestsize(tfm);
+ u8 buffer[size + alignmask];
+ u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ tfm->__crt_alg->cra_digest.dia_final(tfm, dst);
+ memcpy(out, dst, size);
+ } else
+ tfm->__crt_alg->cra_digest.dia_final(tfm, out);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -62,25 +81,15 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
u32 flags;
if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
return -ENOSYS;
- return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm),
- key, keylen, &flags);
+ return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags);
}
static void digest(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg, u8 *out)
{
- unsigned int i;
-
- tfm->crt_digest.dit_init(tfm);
-
- for (i = 0; i < nsg; i++) {
- char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
- tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
- p, sg[i].length);
- crypto_kunmap(p, 0);
- crypto_yield(tfm);
- }
- crypto_digest_final(tfm, out);
+ init(tfm);
+ update(tfm, sg, nsg);
+ final(tfm, out);
}
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 807f2bf4ea24..d4c9d3657b36 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -754,11 +754,11 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
};
-static int khazad_setkey(void *ctx_arg, const u8 *in_key,
- unsigned int key_len, u32 *flags)
+static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct khazad_ctx *ctx = ctx_arg;
- const __be64 *key = (const __be64 *)in_key;
+ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
+ const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
@@ -769,8 +769,9 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL;
}
- K2 = be64_to_cpu(key[0]);
- K1 = be64_to_cpu(key[1]);
+ /* key is supposed to be 32-bit aligned */
+ K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
+ K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -840,15 +841,15 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
*dst = cpu_to_be64(state);
}
-static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
+static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct khazad_ctx *ctx = ctx_arg;
+ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->E, dst, src);
}
-static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
+static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct khazad_ctx *ctx = ctx_arg;
+ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->D, dst, src);
}
diff --git a/crypto/md4.c b/crypto/md4.c
index a2d6df5c0f8c..c1bc71bdc16b 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -152,9 +152,9 @@ static inline void md4_transform_helper(struct md4_ctx *ctx)
md4_transform(ctx->hash, ctx->block);
}
-static void md4_init(void *ctx)
+static void md4_init(struct crypto_tfm *tfm)
{
- struct md4_ctx *mctx = ctx;
+ struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
@@ -163,9 +163,9 @@ static void md4_init(void *ctx)
mctx->byte_count = 0;
}
-static void md4_update(void *ctx, const u8 *data, unsigned int len)
+static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
{
- struct md4_ctx *mctx = ctx;
+ struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
@@ -193,9 +193,9 @@ static void md4_update(void *ctx, const u8 *data, unsigned int len)
memcpy(mctx->block, data, len);
}
-static void md4_final(void *ctx, u8 *out)
+static void md4_final(struct crypto_tfm *tfm, u8 *out)
{
- struct md4_ctx *mctx = ctx;
+ struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
diff --git a/crypto/md5.c b/crypto/md5.c
index 7f041aef5da2..93d18e8b3d53 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -147,9 +147,9 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
md5_transform(ctx->hash, ctx->block);
}
-static void md5_init(void *ctx)
+static void md5_init(struct crypto_tfm *tfm)
{
- struct md5_ctx *mctx = ctx;
+ struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
@@ -158,9 +158,9 @@ static void md5_init(void *ctx)
mctx->byte_count = 0;
}
-static void md5_update(void *ctx, const u8 *data, unsigned int len)
+static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
{
- struct md5_ctx *mctx = ctx;
+ struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
@@ -188,9 +188,9 @@ static void md5_update(void *ctx, const u8 *data, unsigned int len)
memcpy(mctx->block, data, len);
}
-static void md5_final(void *ctx, u8 *out)
+static void md5_final(struct crypto_tfm *tfm, u8 *out)
{
- struct md5_ctx *mctx = ctx;
+ struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 4f6ab23e14ad..d061da21cfda 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -45,16 +45,17 @@ do { \
} while (0)
-static void michael_init(void *ctx)
+static void michael_init(struct crypto_tfm *tfm)
{
- struct michael_mic_ctx *mctx = ctx;
+ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->pending_len = 0;
}
-static void michael_update(void *ctx, const u8 *data, unsigned int len)
+static void michael_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{
- struct michael_mic_ctx *mctx = ctx;
+ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
const __le32 *src;
if (mctx->pending_len) {
@@ -90,9 +91,9 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len)
}
-static void michael_final(void *ctx, u8 *out)
+static void michael_final(struct crypto_tfm *tfm, u8 *out)
{
- struct michael_mic_ctx *mctx = ctx;
+ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
u8 *data = mctx->pending;
__le32 *dst = (__le32 *)out;
@@ -121,10 +122,10 @@ static void michael_final(void *ctx, u8 *out)
}
-static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
+static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
- struct michael_mic_ctx *mctx = ctx;
+ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
const __le32 *data = (const __le32 *)key;
if (keylen != 8) {
@@ -145,6 +146,7 @@ static struct crypto_alg michael_mic_alg = {
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct michael_mic_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = 8,
diff --git a/crypto/serpent.c b/crypto/serpent.c
index e366406ab49d..de60cdddbf4a 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -215,9 +215,11 @@ struct serpent_ctx {
};
-static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
- u32 *k = ((struct serpent_ctx *)ctx)->expkey;
+ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0,r1,r2,r3,r4;
int i;
@@ -365,10 +367,11 @@ static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl
return 0;
}
-static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
+ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
- *k = ((struct serpent_ctx *)ctx)->expkey,
+ *k = ctx->expkey,
*s = (const u32 *)src;
u32 *d = (u32 *)dst,
r0, r1, r2, r3, r4;
@@ -423,8 +426,9 @@ static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src)
d[3] = cpu_to_le32(r3);
}
-static void serpent_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
+ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ((struct serpent_ctx *)ctx)->expkey,
*s = (const u32 *)src;
@@ -492,7 +496,8 @@ static struct crypto_alg serpent_alg = {
.cia_decrypt = serpent_decrypt } }
};
-static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
+static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen, u32 *flags)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
@@ -506,10 +511,10 @@ static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
- return serpent_setkey(ctx, rev_key, keylen, flags);
+ return serpent_setkey(tfm, rev_key, keylen, flags);
}
-static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src)
+static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
@@ -521,7 +526,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src)
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
- serpent_encrypt(ctx, (u8 *)rd, (u8 *)rs);
+ serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
@@ -529,7 +534,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src)
d[3] = swab32(rd[0]);
}
-static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src)
+static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
@@ -541,7 +546,7 @@ static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src)
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
- serpent_decrypt(ctx, (u8 *)rd, (u8 *)rs);
+ serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 21571ed35b7e..6c77b689f87e 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -34,9 +34,9 @@ struct sha1_ctx {
u8 buffer[64];
};
-static void sha1_init(void *ctx)
+static void sha1_init(struct crypto_tfm *tfm)
{
- struct sha1_ctx *sctx = ctx;
+ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
static const struct sha1_ctx initstate = {
0,
{ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
@@ -46,9 +46,10 @@ static void sha1_init(void *ctx)
*sctx = initstate;
}
-static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{
- struct sha1_ctx *sctx = ctx;
+ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int partial, done;
const u8 *src;
@@ -80,9 +81,9 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
/* Add padding and return the message digest. */
-static void sha1_final(void* ctx, u8 *out)
+static void sha1_final(struct crypto_tfm *tfm, u8 *out)
{
- struct sha1_ctx *sctx = ctx;
+ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
@@ -93,10 +94,10 @@ static void sha1_final(void* ctx, u8 *out)
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- sha1_update(sctx, padding, padlen);
+ sha1_update(tfm, padding, padlen);
/* Append length */
- sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
+ sha1_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
@@ -112,6 +113,7 @@ static struct crypto_alg alg = {
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha1_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
diff --git a/crypto/sha256.c b/crypto/sha256.c
index 9d5ef674d6a9..bc71d85a7d02 100644
--- a/crypto/sha256.c
+++ b/crypto/sha256.c
@@ -230,9 +230,9 @@ static void sha256_transform(u32 *state, const u8 *input)
memset(W, 0, 64 * sizeof(u32));
}
-static void sha256_init(void *ctx)
+static void sha256_init(struct crypto_tfm *tfm)
{
- struct sha256_ctx *sctx = ctx;
+ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = H0;
sctx->state[1] = H1;
sctx->state[2] = H2;
@@ -242,12 +242,12 @@ static void sha256_init(void *ctx)
sctx->state[6] = H6;
sctx->state[7] = H7;
sctx->count[0] = sctx->count[1] = 0;
- memset(sctx->buf, 0, sizeof(sctx->buf));
}
-static void sha256_update(void *ctx, const u8 *data, unsigned int len)
+static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len)
{
- struct sha256_ctx *sctx = ctx;
+ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */
@@ -277,9 +277,9 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len)
memcpy(&sctx->buf[index], &data[i], len-i);
}
-static void sha256_final(void* ctx, u8 *out)
+static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
- struct sha256_ctx *sctx = ctx;
+ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
__be32 *dst = (__be32 *)out;
__be32 bits[2];
unsigned int index, pad_len;
@@ -293,10 +293,10 @@ static void sha256_final(void* ctx, u8 *out)
/* Pad out to 56 mod 64. */
index = (sctx->count[0] >> 3) & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
- sha256_update(sctx, padding, pad_len);
+ sha256_update(tfm, padding, pad_len);
/* Append length (before padding) */
- sha256_update(sctx, (const u8 *)bits, sizeof(bits));
+ sha256_update(tfm, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@@ -313,6 +313,7 @@ static struct crypto_alg alg = {
.cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha256_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA256_DIGEST_SIZE,
diff --git a/crypto/sha512.c b/crypto/sha512.c
index 3e6e9392310c..2dfe7f170b48 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -161,9 +161,9 @@ sha512_transform(u64 *state, u64 *W, const u8 *input)
}
static void
-sha512_init(void *ctx)
+sha512_init(struct crypto_tfm *tfm)
{
- struct sha512_ctx *sctx = ctx;
+ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = H0;
sctx->state[1] = H1;
sctx->state[2] = H2;
@@ -173,13 +173,12 @@ sha512_init(void *ctx)
sctx->state[6] = H6;
sctx->state[7] = H7;
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
- memset(sctx->buf, 0, sizeof(sctx->buf));
}
static void
-sha384_init(void *ctx)
+sha384_init(struct crypto_tfm *tfm)
{
- struct sha512_ctx *sctx = ctx;
+ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = HP0;
sctx->state[1] = HP1;
sctx->state[2] = HP2;
@@ -189,13 +188,12 @@ sha384_init(void *ctx)
sctx->state[6] = HP6;
sctx->state[7] = HP7;
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
- memset(sctx->buf, 0, sizeof(sctx->buf));
}
static void
-sha512_update(void *ctx, const u8 *data, unsigned int len)
+sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
{
- struct sha512_ctx *sctx = ctx;
+ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int i, index, part_len;
@@ -233,9 +231,9 @@ sha512_update(void *ctx, const u8 *data, unsigned int len)
}
static void
-sha512_final(void *ctx, u8 *hash)
+sha512_final(struct crypto_tfm *tfm, u8 *hash)
{
- struct sha512_ctx *sctx = ctx;
+ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash;
__be32 bits[4];
@@ -251,10 +249,10 @@ sha512_final(void *ctx, u8 *hash)
/* Pad out to 112 mod 128. */
index = (sctx->count[0] >> 3) & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
- sha512_update(sctx, padding, pad_len);
+ sha512_update(tfm, padding, pad_len);
/* Append length (before padding) */
- sha512_update(sctx, (const u8 *)bits, sizeof(bits));
+ sha512_update(tfm, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@@ -264,12 +262,11 @@ sha512_final(void *ctx, u8 *hash)
memset(sctx, 0, sizeof(struct sha512_ctx));
}
-static void sha384_final(void *ctx, u8 *hash)
+static void sha384_final(struct crypto_tfm *tfm, u8 *hash)
{
- struct sha512_ctx *sctx = ctx;
u8 D[64];
- sha512_final(sctx, D);
+ sha512_final(tfm, D);
memcpy(hash, D, 48);
memset(D, 0, 64);
@@ -281,6 +278,7 @@ static struct crypto_alg sha512 = {
.cra_blocksize = SHA512_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha512_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(sha512.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA512_DIGEST_SIZE,
@@ -295,6 +293,7 @@ static struct crypto_alg sha384 = {
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA384_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha512_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha384.cra_list),
.cra_u = { .digest = {
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 49e344f00806..e52f56c5bd5e 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -570,6 +570,122 @@ out:
crypto_free_tfm(tfm);
}
+static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen,
+ int plen, char *out, int sec)
+{
+ struct scatterlist sg[1];
+ unsigned long start, end;
+ int bcount, pcount;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ crypto_digest_init(tfm);
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ sg_set_buf(sg, p + pcount, plen);
+ crypto_digest_update(tfm, sg, 1);
+ }
+ /* we assume there is enough space in 'out' for the result */
+ crypto_digest_final(tfm, out);
+ }
+
+ printk("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return;
+}
+
+static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen,
+ int plen, char *out)
+{
+ struct scatterlist sg[1];
+ unsigned long cycles = 0;
+ int i, pcount;
+
+ local_bh_disable();
+ local_irq_disable();
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ crypto_digest_init(tfm);
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ sg_set_buf(sg, p + pcount, plen);
+ crypto_digest_update(tfm, sg, 1);
+ }
+ crypto_digest_final(tfm, out);
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ crypto_digest_init(tfm);
+
+ start = get_cycles();
+
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ sg_set_buf(sg, p + pcount, plen);
+ crypto_digest_update(tfm, sg, 1);
+ }
+ crypto_digest_final(tfm, out);
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+ local_irq_enable();
+ local_bh_enable();
+
+ printk("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return;
+}
+
+static void test_digest_speed(char *algo, unsigned int sec,
+ struct digest_speed *speed)
+{
+ struct crypto_tfm *tfm;
+ char output[1024];
+ int i;
+
+ printk("\ntesting speed of %s\n", algo);
+
+ tfm = crypto_alloc_tfm(algo, 0);
+
+ if (tfm == NULL) {
+ printk("failed to load transform for %s\n", algo);
+ return;
+ }
+
+ if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) {
+ printk("digestsize(%u) > outputbuffer(%zu)\n",
+ crypto_tfm_alg_digestsize(tfm), sizeof(output));
+ goto out;
+ }
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ if (speed[i].blen > TVMEMSIZE) {
+ printk("template (%u) too big for tvmem (%u)\n",
+ speed[i].blen, TVMEMSIZE);
+ goto out;
+ }
+
+ printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+ memset(tvmem, 0xff, speed[i].blen);
+
+ if (sec)
+ test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec);
+ else
+ test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output);
+ }
+
+out:
+ crypto_free_tfm(tfm);
+}
+
static void test_deflate(void)
{
unsigned int i;
@@ -1086,6 +1202,60 @@ static void do_test(void)
des_speed_template);
break;
+ case 300:
+ /* fall through */
+
+ case 301:
+ test_digest_speed("md4", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 302:
+ test_digest_speed("md5", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 303:
+ test_digest_speed("sha1", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 304:
+ test_digest_speed("sha256", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 305:
+ test_digest_speed("sha384", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 306:
+ test_digest_speed("sha512", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 307:
+ test_digest_speed("wp256", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 308:
+ test_digest_speed("wp384", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 309:
+ test_digest_speed("wp512", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 310:
+ test_digest_speed("tgr128", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 311:
+ test_digest_speed("tgr160", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 312:
+ test_digest_speed("tgr192", sec, generic_digest_speed_template);
+ if (mode > 300 && mode < 400) break;
+
+ case 399:
+ break;
+
case 1000:
test_available();
break;
@@ -1113,7 +1283,14 @@ static int __init init(void)
kfree(xbuf);
kfree(tvmem);
- return 0;
+
+ /* We intentionaly return -EAGAIN to prevent keeping
+ * the module. It does all its work from init()
+ * and doesn't offer any runtime functionality
+ * => we don't need it in the memory, do we?
+ * -- mludvig
+ */
+ return -EAGAIN;
}
/*
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 1f683ba794ee..1fac5602f633 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -65,6 +65,11 @@ struct cipher_speed {
unsigned int blen;
};
+struct digest_speed {
+ unsigned int blen; /* buffer length */
+ unsigned int plen; /* per-update length */
+};
+
/*
* MD4 test vectors from RFC1320
*/
@@ -2975,4 +2980,35 @@ static struct cipher_speed des_speed_template[] = {
{ .klen = 0, .blen = 0, }
};
+/*
+ * Digest speed tests
+ */
+static struct digest_speed generic_digest_speed_template[] = {
+ { .blen = 16, .plen = 16, },
+ { .blen = 64, .plen = 16, },
+ { .blen = 64, .plen = 64, },
+ { .blen = 256, .plen = 16, },
+ { .blen = 256, .plen = 64, },
+ { .blen = 256, .plen = 256, },
+ { .blen = 1024, .plen = 16, },
+ { .blen = 1024, .plen = 256, },
+ { .blen = 1024, .plen = 1024, },
+ { .blen = 2048, .plen = 16, },
+ { .blen = 2048, .plen = 256, },
+ { .blen = 2048, .plen = 1024, },
+ { .blen = 2048, .plen = 2048, },
+ { .blen = 4096, .plen = 16, },
+ { .blen = 4096, .plen = 256, },
+ { .blen = 4096, .plen = 1024, },
+ { .blen = 4096, .plen = 4096, },
+ { .blen = 8192, .plen = 16, },
+ { .blen = 8192, .plen = 256, },
+ { .blen = 8192, .plen = 1024, },
+ { .blen = 8192, .plen = 4096, },
+ { .blen = 8192, .plen = 8192, },
+
+ /* End marker */
+ { .blen = 0, .plen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/tea.c b/crypto/tea.c
index a6a02b30e470..5367adc82fc9 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -45,10 +45,10 @@ struct xtea_ctx {
u32 KEY[4];
};
-static int tea_setkey(void *ctx_arg, const u8 *in_key,
- unsigned int key_len, u32 *flags)
-{
- struct tea_ctx *ctx = ctx_arg;
+static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
+{
+ struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
@@ -66,12 +66,11 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key,
}
-static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
-
- struct tea_ctx *ctx = ctx_arg;
+ struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
@@ -95,11 +94,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
out[1] = cpu_to_le32(z);
}
-static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
- struct tea_ctx *ctx = ctx_arg;
+ struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
@@ -125,10 +124,10 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
out[1] = cpu_to_le32(z);
}
-static int xtea_setkey(void *ctx_arg, const u8 *in_key,
- unsigned int key_len, u32 *flags)
-{
- struct xtea_ctx *ctx = ctx_arg;
+static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
+{
+ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
@@ -146,12 +145,11 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
}
-static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
-
- struct xtea_ctx *ctx = ctx_arg;
+ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
@@ -168,10 +166,10 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
out[1] = cpu_to_le32(z);
}
-static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, sum;
- struct tea_ctx *ctx = ctx_arg;
+ struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
@@ -191,12 +189,11 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
}
-static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
-
- struct xtea_ctx *ctx = ctx_arg;
+ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
@@ -213,10 +210,10 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
out[1] = cpu_to_le32(z);
}
-static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
+static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
u32 y, z, sum;
- struct tea_ctx *ctx = ctx_arg;
+ struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 2d8e44f6fbe9..a0fadf3dd3e2 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -496,11 +496,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
tctx->c = c;
}
-static void tgr192_init(void *ctx)
+static void tgr192_init(struct crypto_tfm *tfm)
{
- struct tgr192_ctx *tctx = ctx;
+ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
- memset (tctx->hash, 0, 64);
tctx->a = 0x0123456789abcdefULL;
tctx->b = 0xfedcba9876543210ULL;
tctx->c = 0xf096a5b4c3b2e187ULL;
@@ -511,9 +510,10 @@ static void tgr192_init(void *ctx)
/* Update the message digest with the contents
* of INBUF with length INLEN. */
-static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
+static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf,
+ unsigned int len)
{
- struct tgr192_ctx *tctx = ctx;
+ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
if (tctx->count == 64) { /* flush the buffer */
tgr192_transform(tctx, tctx->hash);
@@ -527,7 +527,7 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
for (; len && tctx->count < 64; len--) {
tctx->hash[tctx->count++] = *inbuf++;
}
- tgr192_update(tctx, NULL, 0);
+ tgr192_update(tfm, NULL, 0);
if (!len) {
return;
}
@@ -549,15 +549,15 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
/* The routine terminates the computation */
-static void tgr192_final(void *ctx, u8 * out)
+static void tgr192_final(struct crypto_tfm *tfm, u8 * out)
{
- struct tgr192_ctx *tctx = ctx;
+ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
__be64 *dst = (__be64 *)out;
__be64 *be64p;
__le32 *le32p;
u32 t, msb, lsb;
- tgr192_update(tctx, NULL, 0); /* flush */ ;
+ tgr192_update(tfm, NULL, 0); /* flush */ ;
msb = 0;
t = tctx->nblocks;
@@ -585,7 +585,7 @@ static void tgr192_final(void *ctx, u8 * out)
while (tctx->count < 64) {
tctx->hash[tctx->count++] = 0;
}
- tgr192_update(tctx, NULL, 0); /* flush */ ;
+ tgr192_update(tfm, NULL, 0); /* flush */ ;
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
}
/* append the 64 bit count */
@@ -601,22 +601,20 @@ static void tgr192_final(void *ctx, u8 * out)
dst[2] = be64p[2] = cpu_to_be64(tctx->c);
}
-static void tgr160_final(void *ctx, u8 * out)
+static void tgr160_final(struct crypto_tfm *tfm, u8 * out)
{
- struct tgr192_ctx *wctx = ctx;
u8 D[64];
- tgr192_final(wctx, D);
+ tgr192_final(tfm, D);
memcpy(out, D, TGR160_DIGEST_SIZE);
memset(D, 0, TGR192_DIGEST_SIZE);
}
-static void tgr128_final(void *ctx, u8 * out)
+static void tgr128_final(struct crypto_tfm *tfm, u8 * out)
{
- struct tgr192_ctx *wctx = ctx;
u8 D[64];
- tgr192_final(wctx, D);
+ tgr192_final(tfm, D);
memcpy(out, D, TGR128_DIGEST_SIZE);
memset(D, 0, TGR192_DIGEST_SIZE);
}
@@ -627,6 +625,7 @@ static struct crypto_alg tgr192 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tgr192_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 7,
.cra_list = LIST_HEAD_INIT(tgr192.cra_list),
.cra_u = {.digest = {
.dia_digestsize = TGR192_DIGEST_SIZE,
@@ -641,6 +640,7 @@ static struct crypto_alg tgr160 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tgr192_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 7,
.cra_list = LIST_HEAD_INIT(tgr160.cra_list),
.cra_u = {.digest = {
.dia_digestsize = TGR160_DIGEST_SIZE,
@@ -655,6 +655,7 @@ static struct crypto_alg tgr128 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tgr192_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 7,
.cra_list = LIST_HEAD_INIT(tgr128.cra_list),
.cra_u = {.digest = {
.dia_digestsize = TGR128_DIGEST_SIZE,
diff --git a/crypto/twofish.c b/crypto/twofish.c
index ddfd5a3fcc5f..ec2488242e2d 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish.c
@@ -643,11 +643,11 @@ struct twofish_ctx {
};
/* Perform the key setup. */
-static int twofish_setkey(void *cx, const u8 *key,
- unsigned int key_len, u32 *flags)
+static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len, u32 *flags)
{
- struct twofish_ctx *ctx = cx;
+ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
int i, j, k;
@@ -802,9 +802,9 @@ static int twofish_setkey(void *cx, const u8 *key,
}
/* Encrypt one block. in and out may be the same. */
-static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
+static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct twofish_ctx *ctx = cx;
+ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
@@ -839,9 +839,9 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
}
/* Decrypt one block. in and out may be the same. */
-static void twofish_decrypt(void *cx, u8 *out, const u8 *in)
+static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct twofish_ctx *ctx = cx;
+ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
diff --git a/crypto/wp512.c b/crypto/wp512.c
index b226a126cfae..727d05a19ff4 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -981,9 +981,9 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
}
-static void wp512_init (void *ctx) {
+static void wp512_init(struct crypto_tfm *tfm) {
+ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
int i;
- struct wp512_ctx *wctx = ctx;
memset(wctx->bitLength, 0, 32);
wctx->bufferBits = wctx->bufferPos = 0;
@@ -993,10 +993,10 @@ static void wp512_init (void *ctx) {
}
}
-static void wp512_update(void *ctx, const u8 *source, unsigned int len)
+static void wp512_update(struct crypto_tfm *tfm, const u8 *source,
+ unsigned int len)
{
-
- struct wp512_ctx *wctx = ctx;
+ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
int sourcePos = 0;
unsigned int bits_len = len * 8; // convert to number of bits
int sourceGap = (8 - ((int)bits_len & 7)) & 7;
@@ -1054,9 +1054,9 @@ static void wp512_update(void *ctx, const u8 *source, unsigned int len)
}
-static void wp512_final(void *ctx, u8 *out)
+static void wp512_final(struct crypto_tfm *tfm, u8 *out)
{
- struct wp512_ctx *wctx = ctx;
+ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
int i;
u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
@@ -1087,22 +1087,20 @@ static void wp512_final(void *ctx, u8 *out)
wctx->bufferPos = bufferPos;
}
-static void wp384_final(void *ctx, u8 *out)
+static void wp384_final(struct crypto_tfm *tfm, u8 *out)
{
- struct wp512_ctx *wctx = ctx;
u8 D[64];
- wp512_final (wctx, D);
+ wp512_final(tfm, D);
memcpy (out, D, WP384_DIGEST_SIZE);
memset (D, 0, WP512_DIGEST_SIZE);
}
-static void wp256_final(void *ctx, u8 *out)
+static void wp256_final(struct crypto_tfm *tfm, u8 *out)
{
- struct wp512_ctx *wctx = ctx;
u8 D[64];
- wp512_final (wctx, D);
+ wp512_final(tfm, D);
memcpy (out, D, WP256_DIGEST_SIZE);
memset (D, 0, WP512_DIGEST_SIZE);
}
diff --git a/drivers/Makefile b/drivers/Makefile
index 3c5170310bd0..fc2d744a4e4a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -74,4 +74,5 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
+obj-$(CONFIG_GENERIC_TIME) += clocksource/
obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 94b8d820c512..610d2cc02cf8 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -328,7 +328,7 @@ config ACPI_CONTAINER
config ACPI_HOTPLUG_MEMORY
tristate "Memory Hotplug"
depends on ACPI
- depends on MEMORY_HOTPLUG || X86_64
+ depends on MEMORY_HOTPLUG
default n
help
This driver adds supports for ACPI Memory Hotplug. This driver
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index e0a95ba72371..1012284ff4f7 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -57,6 +57,7 @@ MODULE_LICENSE("GPL");
static int acpi_memory_device_add(struct acpi_device *device);
static int acpi_memory_device_remove(struct acpi_device *device, int type);
+static int acpi_memory_device_start(struct acpi_device *device);
static struct acpi_driver acpi_memory_device_driver = {
.name = ACPI_MEMORY_DEVICE_DRIVER_NAME,
@@ -65,48 +66,79 @@ static struct acpi_driver acpi_memory_device_driver = {
.ops = {
.add = acpi_memory_device_add,
.remove = acpi_memory_device_remove,
+ .start = acpi_memory_device_start,
},
};
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr; /* Memory Range start physical addr */
+ u64 length; /* Memory Range length */
+ unsigned short caching; /* memory cache attribute */
+ unsigned short write_protect; /* memory read/write attribute */
+ unsigned int enabled:1;
+};
+
struct acpi_memory_device {
acpi_handle handle;
unsigned int state; /* State of the memory device */
- unsigned short caching; /* memory cache attribute */
- unsigned short write_protect; /* memory read/write attribute */
- u64 start_addr; /* Memory Range start physical addr */
- u64 length; /* Memory Range length */
+ struct list_head res_list;
};
+static acpi_status
+acpi_memory_get_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_memory_device *mem_device = context;
+ struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *new;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(resource, &address64);
+ if (ACPI_FAILURE(status) ||
+ (address64.resource_type != ACPI_MEMORY_RANGE))
+ return AE_OK;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ /* Can we combine the resource range information? */
+ if ((info->caching == address64.info.mem.caching) &&
+ (info->write_protect == address64.info.mem.write_protect) &&
+ (info->start_addr + info->length == address64.minimum)) {
+ info->length += address64.address_length;
+ return AE_OK;
+ }
+ }
+
+ new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
+ if (!new)
+ return AE_ERROR;
+
+ INIT_LIST_HEAD(&new->list);
+ new->caching = address64.info.mem.caching;
+ new->write_protect = address64.info.mem.write_protect;
+ new->start_addr = address64.minimum;
+ new->length = address64.address_length;
+ list_add_tail(&new->list, &mem_device->res_list);
+
+ return AE_OK;
+}
+
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_resource *resource = NULL;
- struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *n;
ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources");
- /* Get the range from the _CRS */
- status = acpi_get_current_resources(mem_device->handle, &buffer);
- if (ACPI_FAILURE(status))
- return_VALUE(-EINVAL);
-
- resource = (struct acpi_resource *)buffer.pointer;
- status = acpi_resource_to_address64(resource, &address64);
- if (ACPI_SUCCESS(status)) {
- if (address64.resource_type == ACPI_MEMORY_RANGE) {
- /* Populate the structure */
- mem_device->caching = address64.info.mem.caching;
- mem_device->write_protect =
- address64.info.mem.write_protect;
- mem_device->start_addr = address64.minimum;
- mem_device->length = address64.address_length;
- }
+ status = acpi_walk_resources(mem_device->handle, METHOD_NAME__CRS,
+ acpi_memory_get_resource, mem_device);
+ if (ACPI_FAILURE(status)) {
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ return -EINVAL;
}
- acpi_os_free(buffer.pointer);
- return_VALUE(0);
+ return 0;
}
static int
@@ -181,7 +213,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
- int result;
+ int result, num_enabled = 0;
+ struct acpi_memory_info *info;
+ int node;
ACPI_FUNCTION_TRACE("acpi_memory_enable_device");
@@ -194,15 +228,35 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
return result;
}
+ node = acpi_get_node(mem_device->handle);
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
+ * We don't have memory-hot-add rollback function,now.
+ * (i.e. memory-hot-remove function)
*/
- result = add_memory(mem_device->start_addr, mem_device->length);
- if (result) {
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ u64 start_pfn, end_pfn;
+
+ start_pfn = info->start_addr >> PAGE_SHIFT;
+ end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT;
+
+ if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) {
+ /* already enabled. try next area */
+ num_enabled++;
+ continue;
+ }
+
+ result = add_memory(node, info->start_addr, info->length);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+ if (!num_enabled) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
- return result;
+ return -EINVAL;
}
return result;
@@ -246,8 +300,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
- u64 start = mem_device->start_addr;
- u64 len = mem_device->length;
+ struct acpi_memory_info *info, *n;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
@@ -255,10 +308,13 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
* Ask the VM to offline this memory range.
* Note: Assume that this function returns zero on success
*/
- result = remove_memory(start, len);
- if (result) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n"));
- return_VALUE(result);
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
+ if (info->enabled) {
+ result = remove_memory(info->start_addr, info->length);
+ if (result)
+ return result;
+ }
+ kfree(info);
}
/* Power-off and eject the device */
@@ -356,6 +412,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
return_VALUE(-ENOMEM);
memset(mem_device, 0, sizeof(struct acpi_memory_device));
+ INIT_LIST_HEAD(&mem_device->res_list);
mem_device->handle = device->handle;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
@@ -391,6 +448,25 @@ static int acpi_memory_device_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_memory_device_start (struct acpi_device *device)
+{
+ struct acpi_memory_device *mem_device;
+ int result = 0;
+
+ ACPI_FUNCTION_TRACE("acpi_memory_device_start");
+
+ mem_device = acpi_driver_data(device);
+
+ if (!acpi_memory_check_device(mem_device)) {
+ /* call add_memory func */
+ result = acpi_memory_enable_device(mem_device);
+ if (result)
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error in acpi_memory_enable_device\n"));
+ }
+ return_VALUE(result);
+}
+
/*
* Helper function to check for memory device
*/
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e2c1a16078c9..13d6d5bdea26 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -254,5 +254,18 @@ int acpi_get_pxm(acpi_handle h)
} while (ACPI_SUCCESS(status));
return -1;
}
-
EXPORT_SYMBOL(acpi_get_pxm);
+
+int acpi_get_node(acpi_handle *handle)
+{
+ int pxm, node = -1;
+
+ ACPI_FUNCTION_TRACE("acpi_get_node");
+
+ pxm = acpi_get_pxm(handle);
+ if (pxm >= 0)
+ node = acpi_map_pxm_to_node(pxm);
+
+ return_VALUE(node);
+}
+EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3b97a5eae9e8..8a74bf3efd8e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -206,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
static void acpi_safe_halt(void)
{
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
if (!need_resched())
safe_halt();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
}
static atomic_t c3_cpu_count;
@@ -330,10 +330,10 @@ static void acpi_processor_idle(void)
* Invoke the current Cx state to put the processor to sleep.
*/
if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
if (need_resched()) {
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return;
}
@@ -369,9 +369,14 @@ static void acpi_processor_idle(void)
t2 = inl(acpi_fadt.xpm_tmr_blk.address);
/* Get end time (ticks) */
t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+
+#ifdef CONFIG_GENERIC_TIME
+ /* TSC halts in C2, so notify users */
+ mark_tsc_unstable();
+#endif
/* Re-enable interrupts */
local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
@@ -409,9 +414,13 @@ static void acpi_processor_idle(void)
ACPI_MTX_DO_NOT_LOCK);
}
+#ifdef CONFIG_GENERIC_TIME
+ /* TSC halts in C3, so notify users */
+ mark_tsc_unstable();
+#endif
/* Re-enable interrupts */
local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 889855d8d9f9..9e3e2a69c03a 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -180,8 +180,9 @@ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
amba_attr(id, "%08x\n", dev->periphid);
amba_attr(irq0, "%u\n", dev->irq[0]);
amba_attr(irq1, "%u\n", dev->irq[1]);
-amba_attr(resource, "\t%08lx\t%08lx\t%08lx\n",
- dev->res.start, dev->res.end, dev->res.flags);
+amba_attr(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start, (unsigned long long)dev->res.end,
+ dev->res.flags);
/**
* amba_device_register - register an AMBA device
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 4b6bf19c39c0..4048681f36d5 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2257,7 +2257,8 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_
}
PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
- " IO %lx, IRQ %u, MEM %p", pci_resource_start(pci_dev, 1),
+ " IO %llx, IRQ %u, MEM %p",
+ (unsigned long long)pci_resource_start(pci_dev, 1),
irq, bus_to_virt(pci_resource_start(pci_dev, 0)));
// check IO region
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7f7ec288824d..d40605c1af73 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
@@ -754,7 +755,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
fs_kfree_skb (skb);
fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
- memset (td, 0x12, sizeof (struct FS_BPENTRY));
+ memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
kfree (td);
break;
default:
@@ -951,7 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
it most likely that the chip will notice it. It also prevents us
from having to wait for completion. On the other hand, we may
need to wait for completion anyway, to see if it completed
- succesfully. */
+ successfully. */
switch (atm_vcc->qos.aal) {
case ATM_AAL2:
@@ -1657,9 +1658,10 @@ static int __devinit fs_init (struct fs_dev *dev)
func_enter ();
pci_dev = dev->pci_dev;
- printk (KERN_INFO "found a FireStream %d card, base %08lx, irq%d.\n",
+ printk (KERN_INFO "found a FireStream %d card, base %16llx, irq%d.\n",
IS_FS50(dev)?50:155,
- pci_resource_start(pci_dev, 0), dev->pci_dev->irq);
+ (unsigned long long)pci_resource_start(pci_dev, 0),
+ dev->pci_dev->irq);
if (fs_debug & FS_DEBUG_INIT)
my_hd ((unsigned char *) dev, sizeof (*dev));
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index dd712b24ec91..4bef76a2f3f2 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/device.h>
+#include <linux/node.h>
#include "base.h"
@@ -57,13 +58,12 @@ static void __devinit register_cpu_control(struct cpu *cpu)
{
sysdev_create_file(&cpu->sysdev, &attr_online);
}
-void unregister_cpu(struct cpu *cpu, struct node *root)
+void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->sysdev.id;
- if (root)
- sysfs_remove_link(&root->sysdev.kobj,
- kobject_name(&cpu->sysdev.kobj));
+ unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
+
sysdev_remove_file(&cpu->sysdev, &attr_online);
sysdev_unregister(&cpu->sysdev);
@@ -109,23 +109,21 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*
* Initialize and register the CPU device.
*/
-int __devinit register_cpu(struct cpu *cpu, int num, struct node *root)
+int __devinit register_cpu(struct cpu *cpu, int num)
{
int error;
-
cpu->node_id = cpu_to_node(num);
cpu->sysdev.id = num;
cpu->sysdev.cls = &cpu_sysdev_class;
error = sysdev_register(&cpu->sysdev);
- if (!error && root)
- error = sysfs_create_link(&root->sysdev.kobj,
- &cpu->sysdev.kobj,
- kobject_name(&cpu->sysdev.kobj));
+
if (!error && !cpu->no_control)
register_cpu_control(cpu);
if (!error)
cpu_sys_devices[num] = &cpu->sysdev;
+ if (!error)
+ register_cpu_under_node(num, cpu_to_node(num));
#ifdef CONFIG_KEXEC
if (!error)
@@ -145,5 +143,13 @@ EXPORT_SYMBOL_GPL(get_cpu_sysdev);
int __init cpu_dev_init(void)
{
- return sysdev_class_register(&cpu_sysdev_class);
+ int err;
+
+ err = sysdev_class_register(&cpu_sysdev_class);
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ if (!err)
+ err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
+#endif
+
+ return err;
}
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index e2f64f91ed05..33c5cce1560b 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -7,6 +7,7 @@
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/poison.h>
/*
* Pool allocator ... wraps the dma_alloc_coherent page allocator, so
@@ -35,8 +36,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-#define POOL_POISON_FREED 0xa7 /* !inuse */
-#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
static DECLARE_MUTEX (pools_lock);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dd547af4681a..c6b7d9c4b651 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -306,11 +306,13 @@ static ssize_t
memory_probe_store(struct class *class, const char *buf, size_t count)
{
u64 phys_addr;
+ int nid;
int ret;
phys_addr = simple_strtoull(buf, NULL, 0);
- ret = add_memory(phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
+ nid = memory_add_physaddr_to_nid(phys_addr);
+ ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
if (ret)
count = ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c80c3aeed004..eae2bdc183bb 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -11,6 +11,7 @@
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
+#include <linux/cpu.h>
static struct sysdev_class node_class = {
set_kset_name("node"),
@@ -190,6 +191,66 @@ void unregister_node(struct node *node)
sysdev_unregister(&node->sysdev);
}
+struct node node_devices[MAX_NUMNODES];
+
+/*
+ * register cpu under node
+ */
+int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ if (node_online(nid)) {
+ struct sys_device *obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+ return sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ &obj->kobj,
+ kobject_name(&obj->kobj));
+ }
+
+ return 0;
+}
+
+int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ if (node_online(nid)) {
+ struct sys_device *obj = get_cpu_sysdev(cpu);
+ if (obj)
+ sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ kobject_name(&obj->kobj));
+ }
+ return 0;
+}
+
+int register_one_node(int nid)
+{
+ int error = 0;
+ int cpu;
+
+ if (node_online(nid)) {
+ int p_node = parent_node(nid);
+ struct node *parent = NULL;
+
+ if (p_node != nid)
+ parent = &node_devices[p_node];
+
+ error = register_node(&node_devices[nid], nid, parent);
+
+ /* link cpu under this node */
+ for_each_present_cpu(cpu) {
+ if (cpu_to_node(cpu) == nid)
+ register_cpu_under_node(cpu, nid);
+ }
+ }
+
+ return error;
+
+}
+
+void unregister_one_node(int nid)
+{
+ unregister_node(&node_devices[nid]);
+}
+
static int __init register_node_type(void)
{
return sysdev_class_register(&node_class);
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index 520679ce53a8..826093ef4c7e 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -53,8 +53,7 @@ void dpm_resume(void)
struct device * dev = to_device(entry);
get_device(dev);
- list_del_init(entry);
- list_add_tail(entry, &dpm_active);
+ list_move_tail(entry, &dpm_active);
up(&dpm_list_sem);
if (!dev->power.prev_state.event)
@@ -101,8 +100,7 @@ void dpm_power_up(void)
struct device * dev = to_device(entry);
get_device(dev);
- list_del_init(entry);
- list_add_tail(entry, &dpm_active);
+ list_move_tail(entry, &dpm_active);
resume_device(dev);
put_device(dev);
}
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 1a1fe43a3057..69509e02f703 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -116,12 +116,10 @@ int device_suspend(pm_message_t state)
/* Check if the device got removed */
if (!list_empty(&dev->power.entry)) {
/* Move it to the dpm_off or dpm_off_irq list */
- if (!error) {
- list_del(&dev->power.entry);
- list_add(&dev->power.entry, &dpm_off);
- } else if (error == -EAGAIN) {
- list_del(&dev->power.entry);
- list_add(&dev->power.entry, &dpm_off_irq);
+ if (!error)
+ list_move(&dev->power.entry, &dpm_off);
+ else if (error == -EAGAIN) {
+ list_move(&dev->power.entry, &dpm_off_irq);
error = 0;
}
}
@@ -139,8 +137,7 @@ int device_suspend(pm_message_t state)
*/
while (!list_empty(&dpm_off_irq)) {
struct list_head * entry = dpm_off_irq.next;
- list_del(entry);
- list_add(entry, &dpm_off);
+ list_move(entry, &dpm_off);
}
dpm_resume();
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8c52421cbc54..c2d621632383 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
return 0;
}
-static int topology_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -125,7 +125,7 @@ static int topology_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block topology_cpu_notifier =
+static struct notifier_block __cpuinitdata topology_cpu_notifier =
{
.notifier_call = topology_cpu_callback,
};
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9dc294a74953..18dd026f470d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -74,7 +74,6 @@
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
-#include <linux/kthread.h>
#include <asm/uaccess.h>
@@ -211,7 +210,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
{
struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
struct address_space *mapping = file->f_mapping;
- struct address_space_operations *aops = mapping->a_ops;
+ const struct address_space_operations *aops = mapping->a_ops;
pgoff_t index;
unsigned offset, bv_offs;
int len, ret;
@@ -579,6 +578,8 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
+ daemonize("loop%d", lo->lo_number);
+
/*
* loop can be used in an encrypted device,
* hence, it mustn't be stopped at all
@@ -591,6 +592,11 @@ static int loop_thread(void *data)
lo->lo_state = Lo_bound;
lo->lo_pending = 1;
+ /*
+ * complete it, we are running
+ */
+ complete(&lo->lo_done);
+
for (;;) {
int pending;
@@ -623,6 +629,7 @@ static int loop_thread(void *data)
break;
}
+ complete(&lo->lo_done);
return 0;
}
@@ -739,7 +746,6 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
unsigned lo_blocksize;
int lo_flags = 0;
int error;
- struct task_struct *tsk;
loff_t size;
/* This is safe, since we have a reference from open(). */
@@ -778,7 +784,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- struct address_space_operations *aops = mapping->a_ops;
+ const struct address_space_operations *aops = mapping->a_ops;
/*
* If we can't read - sorry. If we only can't write - well,
* it's going to be read-only.
@@ -833,11 +839,10 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
set_blocksize(bdev, lo_blocksize);
- tsk = kthread_run(loop_thread, lo, "loop%d", lo->lo_number);
- if (IS_ERR(tsk)) {
- error = PTR_ERR(tsk);
+ error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
+ if (error < 0)
goto out_putf;
- }
+ wait_for_completion(&lo->lo_done);
return 0;
out_putf:
@@ -893,9 +898,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (!lo->lo_thread)
- return -EINVAL;
-
if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
return -EBUSY;
@@ -909,7 +911,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
complete(&lo->lo_bh_done);
spin_unlock_irq(&lo->lo_lock);
- kthread_stop(lo->lo_thread);
+ wait_for_completion(&lo->lo_done);
lo->lo_backing_file = NULL;
@@ -922,7 +924,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
lo->lo_sizelimit = 0;
lo->lo_encrypt_key_size = 0;
lo->lo_flags = 0;
- lo->lo_thread = NULL;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -1287,6 +1288,7 @@ static int __init loop_init(void)
if (!lo->lo_queue)
goto out_mem4;
mutex_init(&lo->lo_ctl_mutex);
+ init_completion(&lo->lo_done);
init_completion(&lo->lo_bh_done);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 852b564e903a..1a9dee19efcf 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -707,7 +707,7 @@ static int pf_detect(void)
if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pf_scratch, PI_PF, verbose, pf->name)) {
- if (!pf_probe(pf) && pf->disk) {
+ if (pf->disk && !pf_probe(pf)) {
pf->present = 1;
k++;
} else
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 940bfd7951e5..0378da04cfa2 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -191,7 +191,7 @@ static int ramdisk_set_page_dirty(struct page *page)
return 0;
}
-static struct address_space_operations ramdisk_aops = {
+static const struct address_space_operations ramdisk_aops = {
.readpage = ramdisk_readpage,
.prepare_write = ramdisk_prepare_write,
.commit_write = ramdisk_commit_write,
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 2ae08b343b93..8144ce9f4df0 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1694,9 +1694,10 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
DPRINTK("waiting for probe_comp\n");
wait_for_completion(&host->probe_comp);
- printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
+ printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- pci_resource_start(pdev, 0), pdev->irq, host->major);
+ (unsigned long long)pci_resource_start(pdev, 0),
+ pdev->irq, host->major);
carm_host_id++;
pci_set_drvdata(pdev, host);
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index a71a240611e0..ed8dca84ff69 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -423,6 +423,9 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
nsh.len = skb->len;
s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
+ if (!s)
+ return -ENOMEM;
+
skb_reserve(s, NSHL);
memcpy(skb_put(s, skb->len), skb->data, skb->len);
if (skb->len & 0x0001)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 63f28d169b36..c40e487d9f5c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -62,6 +62,23 @@ config HW_CONSOLE
depends on VT && !S390 && !UML
default y
+config VT_HW_CONSOLE_BINDING
+ bool "Support for binding and unbinding console drivers"
+ depends on HW_CONSOLE
+ default n
+ ---help---
+ The virtual terminal is the device that interacts with the physical
+ terminal through console drivers. On these systems, at least one
+ console driver is loaded. In other configurations, additional console
+ drivers may be enabled, such as the framebuffer console. If more than
+ 1 console driver is enabled, setting this to 'y' will allow you to
+ select the console driver that will serve as the backend for the
+ virtual terminals.
+
+ See <file:Documentation/console/console.txt> for more
+ information. For framebuffer console users, please refer to
+ <file:Documentation/fb/fbcon.txt>.
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
---help---
@@ -670,20 +687,7 @@ config NWFLASH
If you're not sure, say N.
-config HW_RANDOM
- tristate "Intel/AMD/VIA HW Random Number Generator support"
- depends on (X86 || IA64) && PCI
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Intel i8xx-based motherboards,
- AMD 76x-based motherboards, and Via Nehemiah CPUs.
-
- Provides a character driver, used to read() entropy data.
-
- To compile this driver as a module, choose M here: the
- module will be called hw_random.
-
- If unsure, say N.
+source "drivers/char/hw_random/Kconfig"
config NVRAM
tristate "/dev/nvram support"
@@ -935,12 +939,36 @@ config MWAVE
config SCx200_GPIO
tristate "NatSemi SCx200 GPIO Support"
depends on SCx200
+ select NSC_GPIO
help
Give userspace access to the GPIO pins on the National
Semiconductor SCx200 processors.
If compiled as a module, it will be called scx200_gpio.
+config PC8736x_GPIO
+ tristate "NatSemi PC8736x GPIO Support"
+ depends on X86
+ default SCx200_GPIO # mostly N
+ select NSC_GPIO # needed for support routines
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip
+ has multiple functional units, inc several managed by
+ hwmon/pc87360 driver. Tested with PC-87366
+
+ If compiled as a module, it will be called pc8736x_gpio.
+
+config NSC_GPIO
+ tristate "NatSemi Base GPIO Support"
+ depends on X86_32
+ # selected by SCx200_GPIO and PC8736x_GPIO
+ # what about 2 selectors differing: m != y
+ help
+ Common support used (and needed) by scx200_gpio and
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
config CS5535_GPIO
tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
depends on X86_32
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fb919bfb2824..6e0f4469d8bb 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -75,13 +75,15 @@ endif
obj-$(CONFIG_TOSHIBA) += toshiba.o
obj-$(CONFIG_I8K) += i8k.o
obj-$(CONFIG_DS1620) += ds1620.o
-obj-$(CONFIG_HW_RANDOM) += hw_random.o
+obj-$(CONFIG_HW_RANDOM) += hw_random/
obj-$(CONFIG_FTAPE) += ftape/
obj-$(CONFIG_COBALT_LCD) += lcd.o
obj-$(CONFIG_PPDEV) += ppdev.o
obj-$(CONFIG_NWBUTTON) += nwbutton.o
obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 46685a540772..9826a399fa02 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -55,9 +55,9 @@ config AGP_AMD
X on AMD Irongate, 761, and 762 chipsets.
config AGP_AMD64
- tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
+ tristate "AMD Opteron/Athlon64 on-CPU GART support" if !IOMMU
depends on AGP && X86
- default y if GART_IOMMU
+ default y if IOMMU
help
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 1f776651ac64..51d0d562d01e 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -118,7 +118,7 @@ static int amd_create_gatt_pages(int nr_tables)
return retval;
}
-/* Since we don't need contigious memory we just try
+/* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index ac3c33a2e37d..f690ee8cb732 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -15,11 +15,9 @@
#include <linux/agp_backend.h>
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
+#include <asm/k8.h>
#include "agp.h"
-/* Will need to be increased if AMD64 ever goes >8-way. */
-#define MAX_HAMMER_GARTS 8
-
/* PTE bits. */
#define GPTE_VALID 1
#define GPTE_COHERENT 2
@@ -53,28 +51,12 @@
#define ULI_X86_64_HTT_FEA_REG 0x50
#define ULI_X86_64_ENU_SCR_REG 0x54
-static int nr_garts;
-static struct pci_dev * hammers[MAX_HAMMER_GARTS];
-
static struct resource *aperture_resource;
static int __initdata agp_try_unsupported = 1;
-#define for_each_nb() for(gart_iterator=0;gart_iterator<nr_garts;gart_iterator++)
-
-static void flush_amd64_tlb(struct pci_dev *dev)
-{
- u32 tmp;
-
- pci_read_config_dword (dev, AMD64_GARTCACHECTL, &tmp);
- tmp |= INVGART;
- pci_write_config_dword (dev, AMD64_GARTCACHECTL, tmp);
-}
-
static void amd64_tlbflush(struct agp_memory *temp)
{
- int gart_iterator;
- for_each_nb()
- flush_amd64_tlb(hammers[gart_iterator]);
+ k8_flush_garts();
}
static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
@@ -153,7 +135,7 @@ static int amd64_fetch_size(void)
u32 temp;
struct aper_size_info_32 *values;
- dev = hammers[0];
+ dev = k8_northbridges[0];
if (dev==NULL)
return 0;
@@ -201,9 +183,6 @@ static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table)
tmp &= ~(DISGARTCPU | DISGARTIO);
pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
- /* keep CPU's coherent. */
- flush_amd64_tlb (hammer);
-
return aper_base;
}
@@ -222,13 +201,14 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void)
{
unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
- int gart_iterator;
+ int i;
/* Configure AGP regs in each x86-64 host bridge. */
- for_each_nb() {
+ for (i = 0; i < num_k8_northbridges; i++) {
agp_bridge->gart_bus_addr =
- amd64_configure(hammers[gart_iterator],gatt_bus);
+ amd64_configure(k8_northbridges[i], gatt_bus);
}
+ k8_flush_garts();
return 0;
}
@@ -236,12 +216,13 @@ static int amd_8151_configure(void)
static void amd64_cleanup(void)
{
u32 tmp;
- int gart_iterator;
- for_each_nb() {
+ int i;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ struct pci_dev *dev = k8_northbridges[i];
/* disable gart translation */
- pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp);
+ pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp);
tmp &= ~AMD64_GARTEN;
- pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp);
+ pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp);
}
}
@@ -311,7 +292,7 @@ static int __devinit aperture_valid(u64 aper, u32 size)
/*
* W*s centric BIOS sometimes only set up the aperture in the AGP
* bridge, not the northbridge. On AMD64 this is handled early
- * in aperture.c, but when GART_IOMMU is not enabled or we run
+ * in aperture.c, but when IOMMU is not enabled or we run
* on a 32bit kernel this needs to be redone.
* Unfortunately it is impossible to fix the aperture here because it's too late
* to allocate that much memory. But at least error out cleanly instead of
@@ -361,17 +342,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
{
- struct pci_dev *loop_dev = NULL;
- int i = 0;
-
- /* cache pci_devs of northbridges. */
- while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
- != NULL) {
- if (i == MAX_HAMMER_GARTS) {
- printk(KERN_ERR PFX "Too many northbridges for AGP\n");
- return -1;
- }
- if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) {
+ int i;
+
+ if (cache_k8_northbridges() < 0)
+ return -ENODEV;
+
+ i = 0;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ struct pci_dev *dev = k8_northbridges[i];
+ if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
printk(KERN_ERR PFX "No usable aperture found.\n");
#ifdef __x86_64__
/* should port this to i386 */
@@ -379,10 +358,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
#endif
return -1;
}
- hammers[i++] = loop_dev;
}
- nr_garts = i;
- return i == 0 ? -1 : 0;
+ return 0;
}
/* Handle AMD 8151 quirks */
@@ -450,7 +427,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
}
/* shadow x86-64 registers into ULi registers */
- pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea);
+ pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
if ((httfea & 0x7fff) >> (32 - 25))
@@ -513,7 +490,7 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
/* shadow x86-64 registers into NVIDIA registers */
- pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
+ pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
@@ -754,10 +731,6 @@ static struct pci_driver agp_amd64_pci_driver = {
int __init agp_amd64_init(void)
{
int err = 0;
- static struct pci_device_id amd64nb[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
- { },
- };
if (agp_off)
return -EINVAL;
@@ -774,7 +747,7 @@ int __init agp_amd64_init(void)
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd64nb))
+ if (!pci_dev_present(k8_nb_ids))
return -ENODEV;
/* Look for any AGP bridge */
@@ -802,7 +775,7 @@ static void __exit agp_amd64_cleanup(void)
/* On AMD64 the PCI driver needs to initialize this driver early
for the IOMMU, so it has to be called via a backdoor. */
-#ifndef CONFIG_GART_IOMMU
+#ifndef CONFIG_IOMMU
module_init(agp_amd64_init);
module_exit(agp_amd64_cleanup);
#endif
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 06fd10ba0c5e..160564345993 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -261,7 +261,7 @@ static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
#endif
/*
- *Since we don't need contigious memory we just try
+ *Since we don't need contiguous memory we just try
* to get the gatt table once
*/
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 86a966b65236..b788b0a3bbf3 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -177,7 +177,7 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
/*
- * Since we don't need contigious memory we just try
+ * Since we don't need contiguous memory we just try
* to get the gatt table once
*/
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index cfa7922cb431..d73be4c2db8a 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -329,9 +329,8 @@ static int __devinit agp_sgi_init(void)
static void __devexit agp_sgi_cleanup(void)
{
- if (sgi_tioca_agp_bridges)
- kfree(sgi_tioca_agp_bridges);
- sgi_tioca_agp_bridges=NULL;
+ kfree(sgi_tioca_agp_bridges);
+ sgi_tioca_agp_bridges = NULL;
}
module_init(agp_sgi_init);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 9275d5e52e6d..72fb60765c45 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -209,13 +209,16 @@ static int __init applicom_init(void)
RamIO = ioremap(dev->resource[0].start, LEN_RAM_IO);
if (!RamIO) {
- printk(KERN_INFO "ac.o: Failed to ioremap PCI memory space at 0x%lx\n", dev->resource[0].start);
+ printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
+ "space at 0x%llx\n",
+ (unsigned long long)dev->resource[0].start);
pci_disable_device(dev);
return -EIO;
}
- printk(KERN_INFO "Applicom %s found at mem 0x%lx, irq %d\n",
- applicom_pci_devnames[dev->device-1], dev->resource[0].start,
+ printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n",
+ applicom_pci_devnames[dev->device-1],
+ (unsigned long long)dev->resource[0].start,
dev->irq);
boardno = ac_register_board(dev->resource[0].start, RamIO,0);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 6543b9a14c42..d117cc997192 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -43,7 +43,7 @@ typedef struct drm_mem_stats {
unsigned long bytes_freed;
} drm_mem_stats_t;
-static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(drm_mem_lock);
static unsigned long drm_ram_available = 0; /* In pages */
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] =
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index b7f17457b424..78a81a4a99c5 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -557,7 +557,7 @@ via_init_dmablit(drm_device_t *dev)
blitq->num_outstanding = 0;
blitq->is_active = 0;
blitq->aborting = 0;
- blitq->blit_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&blitq->blit_lock);
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
}
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 9cad8501d62c..dc0602ae8503 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -80,7 +80,7 @@ static int invalid_lilo_config;
/* The ISA boards do window flipping into the same spaces so its only sane
with a single lock. It's still pretty efficient */
-static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(epca_lock);
/* -----------------------------------------------------------------------
MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index ac626418b329..d69f2ad9a67d 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -117,12 +117,12 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#endif /* not MODULE */
-#if defined(CONFIG_X86) || defined(CONFIG_S390)
+#if defined(CONFIG_X86_64) || defined(CONFIG_S390)
# define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL
#elif defined(CONFIG_IA64)
# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
-#elif defined(CONFIG_PPC64)
+#else
# define TIMER_FREQ (HZ*loops_per_jiffy)
#endif
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8d97b3911293..afa26b65dac3 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -1320,11 +1320,12 @@ static struct tty_operations hvcs_ops = {
static int hvcs_alloc_index_list(int n)
{
int i;
+
hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
if (!hvcs_index_list)
return -ENOMEM;
hvcs_index_count = n;
- for(i = 0; i < hvcs_index_count; i++)
+ for (i = 0; i < hvcs_index_count; i++)
hvcs_index_list[i] = -1;
return 0;
}
@@ -1332,11 +1333,9 @@ static int hvcs_alloc_index_list(int n)
static void hvcs_free_index_list(void)
{
/* Paranoia check to be thorough. */
- if (hvcs_index_list) {
- kfree(hvcs_index_list);
- hvcs_index_list = NULL;
- hvcs_index_count = 0;
- }
+ kfree(hvcs_index_list);
+ hvcs_index_list = NULL;
+ hvcs_index_count = 0;
}
static int __init hvcs_module_init(void)
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
deleted file mode 100644
index 29dc87e59020..000000000000
--- a/drivers/char/hw_random.c
+++ /dev/null
@@ -1,698 +0,0 @@
-/*
- Added support for the AMD Geode LX RNG
- (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
-
- derived from
-
- Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
- (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
-
- derived from
-
- Hardware driver for the AMD 768 Random Number Generator (RNG)
- (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
-
- derived from
-
- Hardware driver for Intel i810 Random Number Generator (RNG)
- Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
- Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
-
- Please read Documentation/hw_random.txt for details on use.
-
- ----------------------------------------------------------
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/random.h>
-#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-
-#ifdef __i386__
-#include <asm/msr.h>
-#include <asm/cpufeature.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-
-/*
- * core module and version information
- */
-#define RNG_VERSION "1.0.0"
-#define RNG_MODULE_NAME "hw_random"
-#define RNG_DRIVER_NAME RNG_MODULE_NAME " hardware driver " RNG_VERSION
-#define PFX RNG_MODULE_NAME ": "
-
-
-/*
- * debugging macros
- */
-
-/* pr_debug() collapses to a no-op if DEBUG is not defined */
-#define DPRINTK(fmt, args...) pr_debug(PFX "%s: " fmt, __FUNCTION__ , ## args)
-
-
-#undef RNG_NDEBUG /* define to enable lightweight runtime checks */
-#ifdef RNG_NDEBUG
-#define assert(expr) \
- if(!(expr)) { \
- printk(KERN_DEBUG PFX "Assertion failed! %s,%s,%s," \
- "line=%d\n", #expr, __FILE__, __FUNCTION__, __LINE__); \
- }
-#else
-#define assert(expr)
-#endif
-
-#define RNG_MISCDEV_MINOR 183 /* official */
-
-static int rng_dev_open (struct inode *inode, struct file *filp);
-static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
- loff_t * offp);
-
-static int __init intel_init (struct pci_dev *dev);
-static void intel_cleanup(void);
-static unsigned int intel_data_present (void);
-static u32 intel_data_read (void);
-
-static int __init amd_init (struct pci_dev *dev);
-static void amd_cleanup(void);
-static unsigned int amd_data_present (void);
-static u32 amd_data_read (void);
-
-#ifdef __i386__
-static int __init via_init(struct pci_dev *dev);
-static void via_cleanup(void);
-static unsigned int via_data_present (void);
-static u32 via_data_read (void);
-#endif
-
-static int __init geode_init(struct pci_dev *dev);
-static void geode_cleanup(void);
-static unsigned int geode_data_present (void);
-static u32 geode_data_read (void);
-
-struct rng_operations {
- int (*init) (struct pci_dev *dev);
- void (*cleanup) (void);
- unsigned int (*data_present) (void);
- u32 (*data_read) (void);
- unsigned int n_bytes; /* number of bytes per ->data_read */
-};
-static struct rng_operations *rng_ops;
-
-static struct file_operations rng_chrdev_ops = {
- .owner = THIS_MODULE,
- .open = rng_dev_open,
- .read = rng_dev_read,
-};
-
-
-static struct miscdevice rng_miscdev = {
- RNG_MISCDEV_MINOR,
- RNG_MODULE_NAME,
- &rng_chrdev_ops,
-};
-
-enum {
- rng_hw_none,
- rng_hw_intel,
- rng_hw_amd,
-#ifdef __i386__
- rng_hw_via,
-#endif
- rng_hw_geode,
-};
-
-static struct rng_operations rng_vendor_ops[] = {
- /* rng_hw_none */
- { },
-
- /* rng_hw_intel */
- { intel_init, intel_cleanup, intel_data_present,
- intel_data_read, 1 },
-
- /* rng_hw_amd */
- { amd_init, amd_cleanup, amd_data_present, amd_data_read, 4 },
-
-#ifdef __i386__
- /* rng_hw_via */
- { via_init, via_cleanup, via_data_present, via_data_read, 1 },
-#endif
-
- /* rng_hw_geode */
- { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 }
-};
-
-/*
- * Data for PCI driver interface
- *
- * This data only exists for exporting the supported
- * PCI ids via MODULE_DEVICE_TABLE. We do not actually
- * register a pci_driver, because someone else might one day
- * want to register another driver on the same PCI id.
- */
-static struct pci_device_id rng_pci_tbl[] = {
- { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
- { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
-
- { 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
- { 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
- { 0x8086, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
- { 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
- { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
- { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
-
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode },
-
- { 0, }, /* terminate list */
-};
-MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
-
-
-/***********************************************************************
- *
- * Intel RNG operations
- *
- */
-
-/*
- * RNG registers (offsets from rng_mem)
- */
-#define INTEL_RNG_HW_STATUS 0
-#define INTEL_RNG_PRESENT 0x40
-#define INTEL_RNG_ENABLED 0x01
-#define INTEL_RNG_STATUS 1
-#define INTEL_RNG_DATA_PRESENT 0x01
-#define INTEL_RNG_DATA 2
-
-/*
- * Magic address at which Intel PCI bridges locate the RNG
- */
-#define INTEL_RNG_ADDR 0xFFBC015F
-#define INTEL_RNG_ADDR_LEN 3
-
-/* token to our ioremap'd RNG register area */
-static void __iomem *rng_mem;
-
-static inline u8 intel_hwstatus (void)
-{
- assert (rng_mem != NULL);
- return readb (rng_mem + INTEL_RNG_HW_STATUS);
-}
-
-static inline u8 intel_hwstatus_set (u8 hw_status)
-{
- assert (rng_mem != NULL);
- writeb (hw_status, rng_mem + INTEL_RNG_HW_STATUS);
- return intel_hwstatus ();
-}
-
-static unsigned int intel_data_present(void)
-{
- assert (rng_mem != NULL);
-
- return (readb (rng_mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT) ?
- 1 : 0;
-}
-
-static u32 intel_data_read(void)
-{
- assert (rng_mem != NULL);
-
- return readb (rng_mem + INTEL_RNG_DATA);
-}
-
-static int __init intel_init (struct pci_dev *dev)
-{
- int rc;
- u8 hw_status;
-
- DPRINTK ("ENTER\n");
-
- rng_mem = ioremap (INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
- if (rng_mem == NULL) {
- printk (KERN_ERR PFX "cannot ioremap RNG Memory\n");
- rc = -EBUSY;
- goto err_out;
- }
-
- /* Check for Intel 82802 */
- hw_status = intel_hwstatus ();
- if ((hw_status & INTEL_RNG_PRESENT) == 0) {
- printk (KERN_ERR PFX "RNG not detected\n");
- rc = -ENODEV;
- goto err_out_free_map;
- }
-
- /* turn RNG h/w on, if it's off */
- if ((hw_status & INTEL_RNG_ENABLED) == 0)
- hw_status = intel_hwstatus_set (hw_status | INTEL_RNG_ENABLED);
- if ((hw_status & INTEL_RNG_ENABLED) == 0) {
- printk (KERN_ERR PFX "cannot enable RNG, aborting\n");
- rc = -EIO;
- goto err_out_free_map;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_out_free_map:
- iounmap (rng_mem);
- rng_mem = NULL;
-err_out:
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-static void intel_cleanup(void)
-{
- u8 hw_status;
-
- hw_status = intel_hwstatus ();
- if (hw_status & INTEL_RNG_ENABLED)
- intel_hwstatus_set (hw_status & ~INTEL_RNG_ENABLED);
- else
- printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
- iounmap(rng_mem);
- rng_mem = NULL;
-}
-
-/***********************************************************************
- *
- * AMD RNG operations
- *
- */
-
-static u32 pmbase; /* PMxx I/O base */
-static struct pci_dev *amd_dev;
-
-static unsigned int amd_data_present (void)
-{
- return inl(pmbase + 0xF4) & 1;
-}
-
-
-static u32 amd_data_read (void)
-{
- return inl(pmbase + 0xF0);
-}
-
-static int __init amd_init (struct pci_dev *dev)
-{
- int rc;
- u8 rnen;
-
- DPRINTK ("ENTER\n");
-
- pci_read_config_dword(dev, 0x58, &pmbase);
-
- pmbase &= 0x0000FF00;
-
- if (pmbase == 0)
- {
- printk (KERN_ERR PFX "power management base not set\n");
- rc = -EIO;
- goto err_out;
- }
-
- pci_read_config_byte(dev, 0x40, &rnen);
- rnen |= (1 << 7); /* RNG on */
- pci_write_config_byte(dev, 0x40, rnen);
-
- pci_read_config_byte(dev, 0x41, &rnen);
- rnen |= (1 << 7); /* PMIO enable */
- pci_write_config_byte(dev, 0x41, rnen);
-
- pr_info( PFX "AMD768 system management I/O registers at 0x%X.\n",
- pmbase);
-
- amd_dev = dev;
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_out:
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-static void amd_cleanup(void)
-{
- u8 rnen;
-
- pci_read_config_byte(amd_dev, 0x40, &rnen);
- rnen &= ~(1 << 7); /* RNG off */
- pci_write_config_byte(amd_dev, 0x40, rnen);
-
- /* FIXME: twiddle pmio, also? */
-}
-
-#ifdef __i386__
-/***********************************************************************
- *
- * VIA RNG operations
- *
- */
-
-enum {
- VIA_STRFILT_CNT_SHIFT = 16,
- VIA_STRFILT_FAIL = (1 << 15),
- VIA_STRFILT_ENABLE = (1 << 14),
- VIA_RAWBITS_ENABLE = (1 << 13),
- VIA_RNG_ENABLE = (1 << 6),
- VIA_XSTORE_CNT_MASK = 0x0F,
-
- VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
- VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
- VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
- VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
- VIA_RNG_CHUNK_2_MASK = 0xFFFF,
- VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
- VIA_RNG_CHUNK_1_MASK = 0xFF,
-};
-
-static u32 via_rng_datum;
-
-/*
- * Investigate using the 'rep' prefix to obtain 32 bits of random data
- * in one insn. The upside is potentially better performance. The
- * downside is that the instruction becomes no longer atomic. Due to
- * this, just like familiar issues with /dev/random itself, the worst
- * case of a 'rep xstore' could potentially pause a cpu for an
- * unreasonably long time. In practice, this condition would likely
- * only occur when the hardware is failing. (or so we hope :))
- *
- * Another possible performance boost may come from simply buffering
- * until we have 4 bytes, thus returning a u32 at a time,
- * instead of the current u8-at-a-time.
- */
-
-static inline u32 xstore(u32 *addr, u32 edx_in)
-{
- u32 eax_out;
-
- asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
- :"=m"(*addr), "=a"(eax_out)
- :"D"(addr), "d"(edx_in));
-
- return eax_out;
-}
-
-static unsigned int via_data_present(void)
-{
- u32 bytes_out;
-
- /* We choose the recommended 1-byte-per-instruction RNG rate,
- * for greater randomness at the expense of speed. Larger
- * values 2, 4, or 8 bytes-per-instruction yield greater
- * speed at lesser randomness.
- *
- * If you change this to another VIA_CHUNK_n, you must also
- * change the ->n_bytes values in rng_vendor_ops[] tables.
- * VIA_CHUNK_8 requires further code changes.
- *
- * A copy of MSR_VIA_RNG is placed in eax_out when xstore
- * completes.
- */
- via_rng_datum = 0; /* paranoia, not really necessary */
- bytes_out = xstore(&via_rng_datum, VIA_RNG_CHUNK_1) & VIA_XSTORE_CNT_MASK;
- if (bytes_out == 0)
- return 0;
-
- return 1;
-}
-
-static u32 via_data_read(void)
-{
- return via_rng_datum;
-}
-
-static int __init via_init(struct pci_dev *dev)
-{
- u32 lo, hi, old_lo;
-
- /* Control the RNG via MSR. Tread lightly and pay very close
- * close attention to values written, as the reserved fields
- * are documented to be "undefined and unpredictable"; but it
- * does not say to write them as zero, so I make a guess that
- * we restore the values we find in the register.
- */
- rdmsr(MSR_VIA_RNG, lo, hi);
-
- old_lo = lo;
- lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
- lo &= ~VIA_XSTORE_CNT_MASK;
- lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
- lo |= VIA_RNG_ENABLE;
-
- if (lo != old_lo)
- wrmsr(MSR_VIA_RNG, lo, hi);
-
- /* perhaps-unnecessary sanity check; remove after testing if
- unneeded */
- rdmsr(MSR_VIA_RNG, lo, hi);
- if ((lo & VIA_RNG_ENABLE) == 0) {
- printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void via_cleanup(void)
-{
- /* do nothing */
-}
-#endif
-
-/***********************************************************************
- *
- * AMD Geode RNG operations
- *
- */
-
-static void __iomem *geode_rng_base = NULL;
-
-#define GEODE_RNG_DATA_REG 0x50
-#define GEODE_RNG_STATUS_REG 0x54
-
-static u32 geode_data_read(void)
-{
- u32 val;
-
- assert(geode_rng_base != NULL);
- val = readl(geode_rng_base + GEODE_RNG_DATA_REG);
- return val;
-}
-
-static unsigned int geode_data_present(void)
-{
- u32 val;
-
- assert(geode_rng_base != NULL);
- val = readl(geode_rng_base + GEODE_RNG_STATUS_REG);
- return val;
-}
-
-static void geode_cleanup(void)
-{
- iounmap(geode_rng_base);
- geode_rng_base = NULL;
-}
-
-static int geode_init(struct pci_dev *dev)
-{
- unsigned long rng_base = pci_resource_start(dev, 0);
-
- if (rng_base == 0)
- return 1;
-
- geode_rng_base = ioremap(rng_base, 0x58);
-
- if (geode_rng_base == NULL) {
- printk(KERN_ERR PFX "Cannot ioremap RNG memory\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/***********************************************************************
- *
- * /dev/hwrandom character device handling (major 10, minor 183)
- *
- */
-
-static int rng_dev_open (struct inode *inode, struct file *filp)
-{
- /* enforce read-only access to this chrdev */
- if ((filp->f_mode & FMODE_READ) == 0)
- return -EINVAL;
- if (filp->f_mode & FMODE_WRITE)
- return -EINVAL;
-
- return 0;
-}
-
-
-static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
- loff_t * offp)
-{
- static DEFINE_SPINLOCK(rng_lock);
- unsigned int have_data;
- u32 data = 0;
- ssize_t ret = 0;
-
- while (size) {
- spin_lock(&rng_lock);
-
- have_data = 0;
- if (rng_ops->data_present()) {
- data = rng_ops->data_read();
- have_data = rng_ops->n_bytes;
- }
-
- spin_unlock (&rng_lock);
-
- while (have_data && size) {
- if (put_user((u8)data, buf++)) {
- ret = ret ? : -EFAULT;
- break;
- }
- size--;
- ret++;
- have_data--;
- data>>=8;
- }
-
- if (filp->f_flags & O_NONBLOCK)
- return ret ? : -EAGAIN;
-
- if(need_resched())
- schedule_timeout_interruptible(1);
- else
- udelay(200); /* FIXME: We could poll for 250uS ?? */
-
- if (signal_pending (current))
- return ret ? : -ERESTARTSYS;
- }
- return ret;
-}
-
-
-
-/*
- * rng_init_one - look for and attempt to init a single RNG
- */
-static int __init rng_init_one (struct pci_dev *dev)
-{
- int rc;
-
- DPRINTK ("ENTER\n");
-
- assert(rng_ops != NULL);
-
- rc = rng_ops->init(dev);
- if (rc)
- goto err_out;
-
- rc = misc_register (&rng_miscdev);
- if (rc) {
- printk (KERN_ERR PFX "misc device register failed\n");
- goto err_out_cleanup_hw;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_out_cleanup_hw:
- rng_ops->cleanup();
-err_out:
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-
-
-MODULE_AUTHOR("The Linux Kernel team");
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
-MODULE_LICENSE("GPL");
-
-
-/*
- * rng_init - initialize RNG module
- */
-static int __init rng_init (void)
-{
- int rc;
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *ent;
-
- DPRINTK ("ENTER\n");
-
- /* Probe for Intel, AMD, Geode RNGs */
- for_each_pci_dev(pdev) {
- ent = pci_match_id(rng_pci_tbl, pdev);
- if (ent) {
- rng_ops = &rng_vendor_ops[ent->driver_data];
- goto match;
- }
- }
-
-#ifdef __i386__
- /* Probe for VIA RNG */
- if (cpu_has_xstore) {
- rng_ops = &rng_vendor_ops[rng_hw_via];
- pdev = NULL;
- goto match;
- }
-#endif
-
- DPRINTK ("EXIT, returning -ENODEV\n");
- return -ENODEV;
-
-match:
- rc = rng_init_one (pdev);
- if (rc)
- return rc;
-
- pr_info( RNG_DRIVER_NAME " loaded\n");
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-/*
- * rng_init - shutdown RNG module
- */
-static void __exit rng_cleanup (void)
-{
- DPRINTK ("ENTER\n");
-
- misc_deregister (&rng_miscdev);
-
- if (rng_ops->cleanup)
- rng_ops->cleanup();
-
- DPRINTK ("EXIT\n");
-}
-
-
-module_init (rng_init);
-module_exit (rng_cleanup);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
new file mode 100644
index 000000000000..9f7635f75178
--- /dev/null
+++ b/drivers/char/hw_random/Kconfig
@@ -0,0 +1,90 @@
+#
+# Hardware Random Number Generator (RNG) configuration
+#
+
+config HW_RANDOM
+ bool "Hardware Random Number Generator Core support"
+ default y
+ ---help---
+ Hardware Random Number Generator Core infrastructure.
+
+ If unsure, say Y.
+
+config HW_RANDOM_INTEL
+ tristate "Intel HW Random Number Generator support"
+ depends on HW_RANDOM && (X86 || IA64) && PCI
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Intel i8xx-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_AMD
+ tristate "AMD HW Random Number Generator support"
+ depends on HW_RANDOM && X86 && PCI
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on AMD 76x-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amd-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_GEODE
+ tristate "AMD Geode HW Random Number Generator support"
+ depends on HW_RANDOM && X86 && PCI
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the AMD Geode LX.
+
+ To compile this driver as a module, choose M here: the
+ module will be called geode-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_VIA
+ tristate "VIA HW Random Number Generator support"
+ depends on HW_RANDOM && X86_32
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on VIA based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called via-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_IXP4XX
+ tristate "Intel IXP4xx NPU HW Random Number Generator support"
+ depends on HW_RANDOM && ARCH_IXP4XX
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random
+ Number Generator hardware found on the Intel IXP4xx NPU.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ixp4xx-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OMAP
+ tristate "OMAP Random Number Generator support"
+ depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP16xx and OMAP24xx multimedia
+ processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
new file mode 100644
index 000000000000..e263ae96f940
--- /dev/null
+++ b/drivers/char/hw_random/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for HW Random Number Generator (RNG) device drivers.
+#
+
+obj-$(CONFIG_HW_RANDOM) += core.o
+obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
+obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
+obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
new file mode 100644
index 000000000000..71e4e0f3fd54
--- /dev/null
+++ b/drivers/char/hw_random/amd-rng.c
@@ -0,0 +1,152 @@
+/*
+ * RNG driver for AMD RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static struct pci_dev *amd_pdev;
+
+
+static int amd_rng_data_present(struct hwrng *rng)
+{
+ u32 pmbase = (u32)rng->priv;
+
+ return !!(inl(pmbase + 0xF4) & 1);
+}
+
+static int amd_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ u32 pmbase = (u32)rng->priv;
+
+ *data = inl(pmbase + 0xF0);
+
+ return 4;
+}
+
+static int amd_rng_init(struct hwrng *rng)
+{
+ u8 rnen;
+
+ pci_read_config_byte(amd_pdev, 0x40, &rnen);
+ rnen |= (1 << 7); /* RNG on */
+ pci_write_config_byte(amd_pdev, 0x40, rnen);
+
+ pci_read_config_byte(amd_pdev, 0x41, &rnen);
+ rnen |= (1 << 7); /* PMIO enable */
+ pci_write_config_byte(amd_pdev, 0x41, rnen);
+
+ return 0;
+}
+
+static void amd_rng_cleanup(struct hwrng *rng)
+{
+ u8 rnen;
+
+ pci_read_config_byte(amd_pdev, 0x40, &rnen);
+ rnen &= ~(1 << 7); /* RNG off */
+ pci_write_config_byte(amd_pdev, 0x40, rnen);
+}
+
+
+static struct hwrng amd_rng = {
+ .name = "amd",
+ .init = amd_rng_init,
+ .cleanup = amd_rng_cleanup,
+ .data_present = amd_rng_data_present,
+ .data_read = amd_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ u32 pmbase;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ goto out;
+
+found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ if (err)
+ goto out;
+ err = -EIO;
+ pmbase &= 0x0000FF00;
+ if (pmbase == 0)
+ goto out;
+ amd_rng.priv = (unsigned long)pmbase;
+ amd_pdev = pdev;
+
+ printk(KERN_INFO "AMD768 RNG detected\n");
+ err = hwrng_register(&amd_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit mod_exit(void)
+{
+ hwrng_unregister(&amd_rng);
+}
+
+subsys_initcall(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("The Linux Kernel team");
+MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
new file mode 100644
index 000000000000..88b026639f10
--- /dev/null
+++ b/drivers/char/hw_random/core.c
@@ -0,0 +1,354 @@
+/*
+ Added support for the AMD Geode LX RNG
+ (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
+
+ derived from
+
+ Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+
+ derived from
+
+ Hardware driver for the AMD 768 Random Number Generator (RNG)
+ (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+
+ derived from
+
+ Hardware driver for Intel i810 Random Number Generator (RNG)
+ Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+
+ Added generic RNG API
+ Copyright 2006 Michael Buesch <mbuesch@freenet.de>
+ Copyright 2005 (c) MontaVista Software, Inc.
+
+ Please read Documentation/hw_random.txt for details on use.
+
+ ----------------------------------------------------------
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ */
+
+
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+
+
+#define RNG_MODULE_NAME "hw_random"
+#define PFX RNG_MODULE_NAME ": "
+#define RNG_MISCDEV_MINOR 183 /* official */
+
+
+static struct hwrng *current_rng;
+static LIST_HEAD(rng_list);
+static DEFINE_MUTEX(rng_mutex);
+
+
+static inline int hwrng_init(struct hwrng *rng)
+{
+ if (!rng->init)
+ return 0;
+ return rng->init(rng);
+}
+
+static inline void hwrng_cleanup(struct hwrng *rng)
+{
+ if (rng && rng->cleanup)
+ rng->cleanup(rng);
+}
+
+static inline int hwrng_data_present(struct hwrng *rng)
+{
+ if (!rng->data_present)
+ return 1;
+ return rng->data_present(rng);
+}
+
+static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
+{
+ return rng->data_read(rng, data);
+}
+
+
+static int rng_dev_open(struct inode *inode, struct file *filp)
+{
+ /* enforce read-only access to this chrdev */
+ if ((filp->f_mode & FMODE_READ) == 0)
+ return -EINVAL;
+ if (filp->f_mode & FMODE_WRITE)
+ return -EINVAL;
+ return 0;
+}
+
+static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+{
+ u32 data;
+ ssize_t ret = 0;
+ int i, err = 0;
+ int data_present;
+ int bytes_read;
+
+ while (size) {
+ err = -ERESTARTSYS;
+ if (mutex_lock_interruptible(&rng_mutex))
+ goto out;
+ if (!current_rng) {
+ mutex_unlock(&rng_mutex);
+ err = -ENODEV;
+ goto out;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ data_present = hwrng_data_present(current_rng);
+ } else {
+ /* Some RNG require some time between data_reads to gather
+ * new entropy. Poll it.
+ */
+ for (i = 0; i < 20; i++) {
+ data_present = hwrng_data_present(current_rng);
+ if (data_present)
+ break;
+ udelay(10);
+ }
+ }
+ bytes_read = 0;
+ if (data_present)
+ bytes_read = hwrng_data_read(current_rng, &data);
+ mutex_unlock(&rng_mutex);
+
+ err = -EAGAIN;
+ if (!bytes_read && (filp->f_flags & O_NONBLOCK))
+ goto out;
+
+ err = -EFAULT;
+ while (bytes_read && size) {
+ if (put_user((u8)data, buf++))
+ goto out;
+ size--;
+ ret++;
+ bytes_read--;
+ data >>= 8;
+ }
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+ err = -ERESTARTSYS;
+ if (signal_pending(current))
+ goto out;
+ }
+out:
+ return ret ? : err;
+}
+
+
+static struct file_operations rng_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = rng_dev_open,
+ .read = rng_dev_read,
+};
+
+static struct miscdevice rng_miscdev = {
+ .minor = RNG_MISCDEV_MINOR,
+ .name = RNG_MODULE_NAME,
+ .fops = &rng_chrdev_ops,
+};
+
+
+static ssize_t hwrng_attr_current_store(struct class_device *class,
+ const char *buf, size_t len)
+{
+ int err;
+ struct hwrng *rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ err = -ENODEV;
+ list_for_each_entry(rng, &rng_list, list) {
+ if (strcmp(rng->name, buf) == 0) {
+ if (rng == current_rng) {
+ err = 0;
+ break;
+ }
+ err = hwrng_init(rng);
+ if (err)
+ break;
+ hwrng_cleanup(current_rng);
+ current_rng = rng;
+ err = 0;
+ break;
+ }
+ }
+ mutex_unlock(&rng_mutex);
+
+ return err ? : len;
+}
+
+static ssize_t hwrng_attr_current_show(struct class_device *class,
+ char *buf)
+{
+ int err;
+ ssize_t ret;
+ const char *name = "none";
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ if (current_rng)
+ name = current_rng->name;
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+ mutex_unlock(&rng_mutex);
+
+ return ret;
+}
+
+static ssize_t hwrng_attr_available_show(struct class_device *class,
+ char *buf)
+{
+ int err;
+ ssize_t ret = 0;
+ struct hwrng *rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ buf[0] = '\0';
+ list_for_each_entry(rng, &rng_list, list) {
+ strncat(buf, rng->name, PAGE_SIZE - ret - 1);
+ ret += strlen(rng->name);
+ strncat(buf, " ", PAGE_SIZE - ret - 1);
+ ret++;
+ }
+ strncat(buf, "\n", PAGE_SIZE - ret - 1);
+ ret++;
+ mutex_unlock(&rng_mutex);
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
+ hwrng_attr_current_show,
+ hwrng_attr_current_store);
+static CLASS_DEVICE_ATTR(rng_available, S_IRUGO,
+ hwrng_attr_available_show,
+ NULL);
+
+
+static void unregister_miscdev(void)
+{
+ class_device_remove_file(rng_miscdev.class,
+ &class_device_attr_rng_available);
+ class_device_remove_file(rng_miscdev.class,
+ &class_device_attr_rng_current);
+ misc_deregister(&rng_miscdev);
+}
+
+static int register_miscdev(void)
+{
+ int err;
+
+ err = misc_register(&rng_miscdev);
+ if (err)
+ goto out;
+ err = class_device_create_file(rng_miscdev.class,
+ &class_device_attr_rng_current);
+ if (err)
+ goto err_misc_dereg;
+ err = class_device_create_file(rng_miscdev.class,
+ &class_device_attr_rng_available);
+ if (err)
+ goto err_remove_current;
+out:
+ return err;
+
+err_remove_current:
+ class_device_remove_file(rng_miscdev.class,
+ &class_device_attr_rng_current);
+err_misc_dereg:
+ misc_deregister(&rng_miscdev);
+ goto out;
+}
+
+int hwrng_register(struct hwrng *rng)
+{
+ int must_register_misc;
+ int err = -EINVAL;
+ struct hwrng *old_rng, *tmp;
+
+ if (rng->name == NULL ||
+ rng->data_read == NULL)
+ goto out;
+
+ mutex_lock(&rng_mutex);
+
+ /* Must not register two RNGs with the same name. */
+ err = -EEXIST;
+ list_for_each_entry(tmp, &rng_list, list) {
+ if (strcmp(tmp->name, rng->name) == 0)
+ goto out_unlock;
+ }
+
+ must_register_misc = (current_rng == NULL);
+ old_rng = current_rng;
+ if (!old_rng) {
+ err = hwrng_init(rng);
+ if (err)
+ goto out_unlock;
+ current_rng = rng;
+ }
+ err = 0;
+ if (must_register_misc) {
+ err = register_miscdev();
+ if (err) {
+ if (!old_rng) {
+ hwrng_cleanup(rng);
+ current_rng = NULL;
+ }
+ goto out_unlock;
+ }
+ }
+ INIT_LIST_HEAD(&rng->list);
+ list_add_tail(&rng->list, &rng_list);
+out_unlock:
+ mutex_unlock(&rng_mutex);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(hwrng_register);
+
+void hwrng_unregister(struct hwrng *rng)
+{
+ int err;
+
+ mutex_lock(&rng_mutex);
+
+ list_del(&rng->list);
+ if (current_rng == rng) {
+ hwrng_cleanup(rng);
+ if (list_empty(&rng_list)) {
+ current_rng = NULL;
+ } else {
+ current_rng = list_entry(rng_list.prev, struct hwrng, list);
+ err = hwrng_init(current_rng);
+ if (err)
+ current_rng = NULL;
+ }
+ }
+ if (list_empty(&rng_list))
+ unregister_miscdev();
+
+ mutex_unlock(&rng_mutex);
+}
+EXPORT_SYMBOL_GPL(hwrng_unregister);
+
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
new file mode 100644
index 000000000000..be61f22ee7bb
--- /dev/null
+++ b/drivers/char/hw_random/geode-rng.c
@@ -0,0 +1,128 @@
+/*
+ * RNG driver for AMD Geode RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+#define GEODE_RNG_DATA_REG 0x50
+#define GEODE_RNG_STATUS_REG 0x54
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+
+static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+ return 4;
+}
+
+static int geode_rng_data_present(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ return !!(readl(mem + GEODE_RNG_STATUS_REG));
+}
+
+
+static struct hwrng geode_rng = {
+ .name = "geode",
+ .data_present = geode_rng_data_present,
+ .data_read = geode_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+ unsigned long rng_base;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ goto out;
+
+found:
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+ goto out;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+ goto out;
+ geode_rng.priv = (unsigned long)mem;
+
+ printk(KERN_INFO "AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit mod_exit(void)
+{
+ void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+ hwrng_unregister(&geode_rng);
+ iounmap(mem);
+}
+
+subsys_initcall(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
new file mode 100644
index 000000000000..6594bd5645f4
--- /dev/null
+++ b/drivers/char/hw_random/intel-rng.c
@@ -0,0 +1,189 @@
+/*
+ * RNG driver for Intel RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+/*
+ * RNG registers
+ */
+#define INTEL_RNG_HW_STATUS 0
+#define INTEL_RNG_PRESENT 0x40
+#define INTEL_RNG_ENABLED 0x01
+#define INTEL_RNG_STATUS 1
+#define INTEL_RNG_DATA_PRESENT 0x01
+#define INTEL_RNG_DATA 2
+
+/*
+ * Magic address at which Intel PCI bridges locate the RNG
+ */
+#define INTEL_RNG_ADDR 0xFFBC015F
+#define INTEL_RNG_ADDR_LEN 3
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x8086, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+
+static inline u8 hwstatus_get(void __iomem *mem)
+{
+ return readb(mem + INTEL_RNG_HW_STATUS);
+}
+
+static inline u8 hwstatus_set(void __iomem *mem,
+ u8 hw_status)
+{
+ writeb(hw_status, mem + INTEL_RNG_HW_STATUS);
+ return hwstatus_get(mem);
+}
+
+static int intel_rng_data_present(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
+}
+
+static int intel_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ *data = readb(mem + INTEL_RNG_DATA);
+
+ return 1;
+}
+
+static int intel_rng_init(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+ int err = -EIO;
+
+ hw_status = hwstatus_get(mem);
+ /* turn RNG h/w on, if it's off */
+ if ((hw_status & INTEL_RNG_ENABLED) == 0)
+ hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
+ if ((hw_status & INTEL_RNG_ENABLED) == 0) {
+ printk(KERN_ERR PFX "cannot enable RNG, aborting\n");
+ goto out;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static void intel_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+
+ hw_status = hwstatus_get(mem);
+ if (hw_status & INTEL_RNG_ENABLED)
+ hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
+ else
+ printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
+}
+
+
+static struct hwrng intel_rng = {
+ .name = "intel",
+ .init = intel_rng_init,
+ .cleanup = intel_rng_cleanup,
+ .data_present = intel_rng_data_present,
+ .data_read = intel_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ void __iomem *mem;
+ u8 hw_status;
+
+ if (!pci_dev_present(pci_tbl))
+ goto out; /* Device not found. */
+
+ err = -ENOMEM;
+ mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
+ if (!mem)
+ goto out;
+ intel_rng.priv = (unsigned long)mem;
+
+ /* Check for Intel 82802 */
+ err = -ENODEV;
+ hw_status = hwstatus_get(mem);
+ if ((hw_status & INTEL_RNG_PRESENT) == 0)
+ goto err_unmap;
+
+ printk(KERN_INFO "Intel 82802 RNG detected\n");
+ err = hwrng_register(&intel_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+
+err_unmap:
+ iounmap(mem);
+ goto out;
+}
+
+static void __exit mod_exit(void)
+{
+ void __iomem *mem = (void __iomem *)intel_rng.priv;
+
+ hwrng_unregister(&intel_rng);
+ iounmap(mem);
+}
+
+subsys_initcall(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
new file mode 100644
index 000000000000..ef71022423c9
--- /dev/null
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -0,0 +1,73 @@
+/*
+ * drivers/char/rng/ixp4xx-rng.c
+ *
+ * RNG driver for Intel IXP4xx family of NPUs
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Fixes by Michael Buesch
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/hw_random.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+
+static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+ void __iomem * rng_base = (void __iomem *)rng->priv;
+
+ *buffer = __raw_readl(rng_base);
+
+ return 4;
+}
+
+static struct hwrng ixp4xx_rng_ops = {
+ .name = "ixp4xx",
+ .data_read = ixp4xx_rng_data_read,
+};
+
+static int __init ixp4xx_rng_init(void)
+{
+ void __iomem * rng_base;
+ int err;
+
+ rng_base = ioremap(0x70002100, 4);
+ if (!rng_base)
+ return -ENOMEM;
+ ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+ err = hwrng_register(&ixp4xx_rng_ops);
+ if (err)
+ iounmap(rng_base);
+
+ return err;
+}
+
+static void __exit ixp4xx_rng_exit(void)
+{
+ void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
+
+ hwrng_unregister(&ixp4xx_rng_ops);
+ iounmap(rng_base);
+}
+
+subsys_initcall(ixp4xx_rng_init);
+module_exit(ixp4xx_rng_exit);
+
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
new file mode 100644
index 000000000000..819516b35a79
--- /dev/null
+++ b/drivers/char/hw_random/omap-rng.c
@@ -0,0 +1,208 @@
+/*
+ * driver/char/hw_random/omap-rng.c
+ *
+ * RNG driver for TI OMAP CPU family
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Mostly based on original driver:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrj��<juha.yrjola@nokia.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * TODO:
+ *
+ * - Make status updated be interrupt driven so we don't poll
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#define RNG_OUT_REG 0x00 /* Output register */
+#define RNG_STAT_REG 0x04 /* Status register
+ [0] = STAT_BUSY */
+#define RNG_ALARM_REG 0x24 /* Alarm register
+ [7:0] = ALARM_COUNTER */
+#define RNG_CONFIG_REG 0x28 /* Configuration register
+ [11:6] = RESET_COUNT
+ [5:3] = RING2_DELAY
+ [2:0] = RING1_DELAY */
+#define RNG_REV_REG 0x3c /* Revision register
+ [7:0] = REV_NB */
+#define RNG_MASK_REG 0x40 /* Mask and reset register
+ [2] = IT_EN
+ [1] = SOFTRESET
+ [0] = AUTOIDLE */
+#define RNG_SYSSTATUS 0x44 /* System status
+ [0] = RESETDONE */
+
+static void __iomem *rng_base;
+static struct clk *rng_ick;
+static struct device *rng_dev;
+
+static u32 omap_rng_read_reg(int reg)
+{
+ return __raw_readl(rng_base + reg);
+}
+
+static void omap_rng_write_reg(int reg, u32 val)
+{
+ __raw_writel(val, rng_base + reg);
+}
+
+/* REVISIT: Does the status bit really work on 16xx? */
+static int omap_rng_data_present(struct hwrng *rng)
+{
+ return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+}
+
+static int omap_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ *data = omap_rng_read_reg(RNG_OUT_REG);
+
+ return 4;
+}
+
+static struct hwrng omap_rng_ops = {
+ .name = "omap",
+ .data_present = omap_rng_data_present,
+ .data_read = omap_rng_data_read,
+};
+
+static int __init omap_rng_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res, *mem;
+ int ret;
+
+ /*
+ * A bit ugly, and it will never actually happen but there can
+ * be only one RNG and this catches any bork
+ */
+ BUG_ON(rng_dev);
+
+ if (cpu_is_omap24xx()) {
+ rng_ick = clk_get(NULL, "rng_ick");
+ if (IS_ERR(rng_ick)) {
+ dev_err(dev, "Could not get rng_ick\n");
+ ret = PTR_ERR(rng_ick);
+ return ret;
+ }
+ else {
+ clk_use(rng_ick);
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -ENOENT;
+
+ mem = request_mem_region(res->start, res->end - res->start + 1,
+ pdev->name);
+ if (mem == NULL)
+ return -EBUSY;
+
+ dev_set_drvdata(dev, mem);
+ rng_base = (u32 __iomem *)io_p2v(res->start);
+
+ ret = hwrng_register(&omap_rng_ops);
+ if (ret) {
+ release_resource(mem);
+ rng_base = NULL;
+ return ret;
+ }
+
+ dev_info(dev, "OMAP Random Number Generator ver. %02x\n",
+ omap_rng_read_reg(RNG_REV_REG));
+ omap_rng_write_reg(RNG_MASK_REG, 0x1);
+
+ rng_dev = dev;
+
+ return 0;
+}
+
+static int __exit omap_rng_remove(struct device *dev)
+{
+ struct resource *mem = dev_get_drvdata(dev);
+
+ hwrng_unregister(&omap_rng_ops);
+
+ omap_rng_write_reg(RNG_MASK_REG, 0x0);
+
+ if (cpu_is_omap24xx()) {
+ clk_unuse(rng_ick);
+ clk_put(rng_ick);
+ }
+
+ release_resource(mem);
+ rng_base = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int omap_rng_suspend(struct device *dev, pm_message_t message, u32 level)
+{
+ omap_rng_write_reg(RNG_MASK_REG, 0x0);
+
+ return 0;
+}
+
+static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level)
+{
+ omap_rng_write_reg(RNG_MASK_REG, 0x1);
+
+ return 1;
+}
+
+#else
+
+#define omap_rng_suspend NULL
+#define omap_rng_resume NULL
+
+#endif
+
+
+static struct device_driver omap_rng_driver = {
+ .name = "omap_rng",
+ .bus = &platform_bus_type,
+ .probe = omap_rng_probe,
+ .remove = __exit_p(omap_rng_remove),
+ .suspend = omap_rng_suspend,
+ .resume = omap_rng_resume
+};
+
+static int __init omap_rng_init(void)
+{
+ if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
+ return -ENODEV;
+
+ return driver_register(&omap_rng_driver);
+}
+
+static void __exit omap_rng_exit(void)
+{
+ driver_unregister(&omap_rng_driver);
+}
+
+module_init(omap_rng_init);
+module_exit(omap_rng_exit);
+
+MODULE_AUTHOR("Deepak Saxena (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
new file mode 100644
index 000000000000..0e786b617bb8
--- /dev/null
+++ b/drivers/char/hw_random/via-rng.c
@@ -0,0 +1,183 @@
+/*
+ * RNG driver for VIA RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+
+enum {
+ VIA_STRFILT_CNT_SHIFT = 16,
+ VIA_STRFILT_FAIL = (1 << 15),
+ VIA_STRFILT_ENABLE = (1 << 14),
+ VIA_RAWBITS_ENABLE = (1 << 13),
+ VIA_RNG_ENABLE = (1 << 6),
+ VIA_XSTORE_CNT_MASK = 0x0F,
+
+ VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
+ VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
+ VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_2_MASK = 0xFFFF,
+ VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_1_MASK = 0xFF,
+};
+
+/*
+ * Investigate using the 'rep' prefix to obtain 32 bits of random data
+ * in one insn. The upside is potentially better performance. The
+ * downside is that the instruction becomes no longer atomic. Due to
+ * this, just like familiar issues with /dev/random itself, the worst
+ * case of a 'rep xstore' could potentially pause a cpu for an
+ * unreasonably long time. In practice, this condition would likely
+ * only occur when the hardware is failing. (or so we hope :))
+ *
+ * Another possible performance boost may come from simply buffering
+ * until we have 4 bytes, thus returning a u32 at a time,
+ * instead of the current u8-at-a-time.
+ */
+
+static inline u32 xstore(u32 *addr, u32 edx_in)
+{
+ u32 eax_out;
+
+ asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
+ :"=m"(*addr), "=a"(eax_out)
+ :"D"(addr), "d"(edx_in));
+
+ return eax_out;
+}
+
+static int via_rng_data_present(struct hwrng *rng)
+{
+ u32 bytes_out;
+ u32 *via_rng_datum = (u32 *)(&rng->priv);
+
+ /* We choose the recommended 1-byte-per-instruction RNG rate,
+ * for greater randomness at the expense of speed. Larger
+ * values 2, 4, or 8 bytes-per-instruction yield greater
+ * speed at lesser randomness.
+ *
+ * If you change this to another VIA_CHUNK_n, you must also
+ * change the ->n_bytes values in rng_vendor_ops[] tables.
+ * VIA_CHUNK_8 requires further code changes.
+ *
+ * A copy of MSR_VIA_RNG is placed in eax_out when xstore
+ * completes.
+ */
+
+ *via_rng_datum = 0; /* paranoia, not really necessary */
+ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+ bytes_out &= VIA_XSTORE_CNT_MASK;
+ if (bytes_out == 0)
+ return 0;
+ return 1;
+}
+
+static int via_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ u32 via_rng_datum = (u32)rng->priv;
+
+ *data = via_rng_datum;
+
+ return 1;
+}
+
+static int via_rng_init(struct hwrng *rng)
+{
+ u32 lo, hi, old_lo;
+
+ /* Control the RNG via MSR. Tread lightly and pay very close
+ * close attention to values written, as the reserved fields
+ * are documented to be "undefined and unpredictable"; but it
+ * does not say to write them as zero, so I make a guess that
+ * we restore the values we find in the register.
+ */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+
+ old_lo = lo;
+ lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
+ lo &= ~VIA_XSTORE_CNT_MASK;
+ lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
+ lo |= VIA_RNG_ENABLE;
+
+ if (lo != old_lo)
+ wrmsr(MSR_VIA_RNG, lo, hi);
+
+ /* perhaps-unnecessary sanity check; remove after testing if
+ unneeded */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+ if ((lo & VIA_RNG_ENABLE) == 0) {
+ printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static struct hwrng via_rng = {
+ .name = "via",
+ .init = via_rng_init,
+ .data_present = via_rng_data_present,
+ .data_read = via_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err;
+
+ if (!cpu_has_xstore)
+ return -ENODEV;
+ printk(KERN_INFO "VIA RNG detected\n");
+ err = hwrng_register(&via_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit mod_exit(void)
+{
+ hwrng_unregister(&via_rng);
+}
+
+subsys_initcall(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9f2f8fdec69a..ad26f4b997c5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -57,8 +57,7 @@ static int ipmi_init_msghandler(void);
static int initialized = 0;
#ifdef CONFIG_PROC_FS
-struct proc_dir_entry *proc_ipmi_root = NULL;
-EXPORT_SYMBOL(proc_ipmi_root);
+static struct proc_dir_entry *proc_ipmi_root = NULL;
#endif /* CONFIG_PROC_FS */
#define MAX_EVENTS_IN_QUEUE 25
@@ -936,11 +935,8 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
if (val) {
/* Deliver any queued events. */
- list_for_each_entry_safe(msg, msg2, &intf->waiting_events,
- link) {
- list_del(&msg->link);
- list_add_tail(&msg->link, &msgs);
- }
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+ list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
}
@@ -3677,7 +3673,7 @@ static void send_panic_events(char *str)
}
#endif /* CONFIG_IPMI_PANIC_EVENT */
-static int has_paniced = 0;
+static int has_panicked = 0;
static int panic_event(struct notifier_block *this,
unsigned long event,
@@ -3686,9 +3682,9 @@ static int panic_event(struct notifier_block *this,
int i;
ipmi_smi_t intf;
- if (has_paniced)
+ if (has_panicked)
return NOTIFY_DONE;
- has_paniced = 1;
+ has_panicked = 1;
/* For every registered interface, set it to run to completion. */
for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
@@ -3742,11 +3738,8 @@ static int ipmi_init_msghandler(void)
proc_ipmi_root->owner = THIS_MODULE;
#endif /* CONFIG_PROC_FS */
- init_timer(&ipmi_timer);
- ipmi_timer.data = 0;
- ipmi_timer.function = ipmi_timeout;
- ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
- add_timer(&ipmi_timer);
+ setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 02a7dd7a8a55..bd4f2248b758 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -55,23 +55,6 @@
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
-#ifdef CONFIG_HIGH_RES_TIMERS
-#include <linux/hrtime.h>
-# if defined(schedule_next_int)
-/* Old high-res timer code, do translations. */
-# define get_arch_cycles(a) quick_update_jiffies_sub(a)
-# define arch_cycles_per_jiffy cycles_per_jiffies
-# endif
-static inline void add_usec_to_timer(struct timer_list *t, long v)
-{
- t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
- while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
- {
- t->expires++;
- t->arch_cycle_expires -= arch_cycles_per_jiffy;
- }
-}
-#endif
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/ipmi_smi.h>
@@ -243,8 +226,6 @@ static int register_xaction_notifier(struct notifier_block * nb)
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
-static void si_restart_short_timer(struct smi_info *smi_info);
-
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
@@ -768,7 +749,6 @@ static void sender(void *send_info,
&& (smi_info->curr_msg == NULL))
{
start_next_msg(smi_info);
- si_restart_short_timer(smi_info);
}
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
@@ -809,7 +789,7 @@ static int ipmi_thread(void *data)
/* do nothing */
}
else if (smi_result == SI_SM_CALL_WITH_DELAY)
- udelay(1);
+ schedule();
else
schedule_timeout_interruptible(1);
}
@@ -833,37 +813,6 @@ static void request_events(void *send_info)
static int initialized = 0;
-/* Must be called with interrupts off and with the si_lock held. */
-static void si_restart_short_timer(struct smi_info *smi_info)
-{
-#if defined(CONFIG_HIGH_RES_TIMERS)
- unsigned long flags;
- unsigned long jiffies_now;
- unsigned long seq;
-
- if (del_timer(&(smi_info->si_timer))) {
- /* If we don't delete the timer, then it will go off
- immediately, anyway. So we only process if we
- actually delete the timer. */
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- jiffies_now = jiffies;
- smi_info->si_timer.expires = jiffies_now;
- smi_info->si_timer.arch_cycle_expires
- = get_arch_cycles(jiffies_now);
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
-
- add_timer(&(smi_info->si_timer));
- spin_lock_irqsave(&smi_info->count_lock, flags);
- smi_info->timeout_restarts++;
- spin_unlock_irqrestore(&smi_info->count_lock, flags);
- }
-#endif
-}
-
static void smi_timeout(unsigned long data)
{
struct smi_info *smi_info = (struct smi_info *) data;
@@ -904,31 +853,15 @@ static void smi_timeout(unsigned long data)
/* If the state machine asks for a short delay, then shorten
the timer timeout. */
if (smi_result == SI_SM_CALL_WITH_DELAY) {
-#if defined(CONFIG_HIGH_RES_TIMERS)
- unsigned long seq;
-#endif
spin_lock_irqsave(&smi_info->count_lock, flags);
smi_info->short_timeouts++;
spin_unlock_irqrestore(&smi_info->count_lock, flags);
-#if defined(CONFIG_HIGH_RES_TIMERS)
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- smi_info->si_timer.expires = jiffies;
- smi_info->si_timer.arch_cycle_expires
- = get_arch_cycles(smi_info->si_timer.expires);
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
- add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
-#else
smi_info->si_timer.expires = jiffies + 1;
-#endif
} else {
spin_lock_irqsave(&smi_info->count_lock, flags);
smi_info->long_timeouts++;
spin_unlock_irqrestore(&smi_info->count_lock, flags);
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
-#if defined(CONFIG_HIGH_RES_TIMERS)
- smi_info->si_timer.arch_cycle_expires = 0;
-#endif
}
do_add_timer:
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 8f8867170973..1a0a19c53605 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -949,9 +949,10 @@ static int wdog_reboot_handler(struct notifier_block *this,
/* Disable the WDT if we are shutting down. */
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
panic_halt_ipmi_set_timeout();
- } else {
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/* Set a long timer to let the reboot happens, but
- reboot if it hangs. */
+ reboot if it hangs, but only if the watchdog
+ timer was already running. */
timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
@@ -973,16 +974,17 @@ static int wdog_panic_handler(struct notifier_block *this,
{
static int panic_event_handled = 0;
- /* On a panic, if we have a panic timeout, make sure that the thing
- reboots, even if it hangs during that panic. */
- if (watchdog_user && !panic_event_handled) {
- /* Make sure the panic doesn't hang, and make sure we
- do this only once. */
+ /* On a panic, if we have a panic timeout, make sure to extend
+ the watchdog timer to a reasonable value to complete the
+ panic, if the watchdog timer is running. Plus the
+ pretimeout is meaningless at panic time. */
+ if (watchdog_user && !panic_event_handled &&
+ ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Make sure we do this only once. */
panic_event_handled = 1;
timeout = 255;
pretimeout = 0;
- ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
panic_halt_ipmi_set_timeout();
}
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index ef20c1fc9c4c..216c79256de3 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -42,13 +42,12 @@
#include <linux/devfs_fs_kernel.h>
#include <linux/device.h>
#include <linux/wait.h>
+#include <linux/eisa.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_PCI
#include <linux/pci.h>
-#endif
/*****************************************************************************/
@@ -137,6 +136,10 @@ static stlconf_t stli_brdconf[] = {
static int stli_nrbrds = ARRAY_SIZE(stli_brdconf);
+/* stli_lock must NOT be taken holding brd_lock */
+static spinlock_t stli_lock; /* TTY logic lock */
+static spinlock_t brd_lock; /* Board logic lock */
+
/*
* There is some experimental EISA board detection code in this driver.
* By default it is disabled, but for those that want to try it out,
@@ -173,14 +176,6 @@ static char *stli_serialname = "ttyE";
static struct tty_driver *stli_serial;
-/*
- * We will need to allocate a temporary write buffer for chars that
- * come direct from user space. The problem is that a copy from user
- * space might cause a page fault (typically on a system that is
- * swapping!). All ports will share one buffer - since if the system
- * is already swapping a shared buffer won't make things any worse.
- */
-static char *stli_tmpwritebuf;
#define STLI_TXBUFSIZE 4096
@@ -419,7 +414,7 @@ static int stli_eisamempsize = ARRAY_SIZE(stli_eisamemprobeaddrs);
#endif
static struct pci_device_id istallion_pci_tbl[] = {
- { PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECRA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECRA), },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, istallion_pci_tbl);
@@ -682,7 +677,7 @@ static int stli_startbrd(stlibrd_t *brdp);
static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
-static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp);
+static void stli_brdpoll(stlibrd_t *brdp, cdkhdr_t __iomem *hdrp);
static void stli_poll(unsigned long arg);
static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp);
static int stli_initopen(stlibrd_t *brdp, stliport_t *portp);
@@ -693,7 +688,8 @@ static void stli_dohangup(void *arg);
static int stli_setport(stliport_t *portp);
static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
-static void stli_dodelaycmd(stliport_t *portp, volatile cdkctrl_t *cp);
+static void __stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
+static void stli_dodelaycmd(stliport_t *portp, cdkctrl_t __iomem *cp);
static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tiosp);
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts);
static long stli_mktiocm(unsigned long sigvalue);
@@ -799,18 +795,8 @@ static struct class *istallion_class;
static int __init istallion_module_init(void)
{
- unsigned long flags;
-
-#ifdef DEBUG
- printk("init_module()\n");
-#endif
-
- save_flags(flags);
- cli();
stli_init();
- restore_flags(flags);
-
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -819,33 +805,24 @@ static void __exit istallion_module_exit(void)
{
stlibrd_t *brdp;
stliport_t *portp;
- unsigned long flags;
int i, j;
-#ifdef DEBUG
- printk("cleanup_module()\n");
-#endif
-
printk(KERN_INFO "Unloading %s: version %s\n", stli_drvtitle,
stli_drvversion);
- save_flags(flags);
- cli();
-
-/*
- * Free up all allocated resources used by the ports. This includes
- * memory and interrupts.
- */
+ /*
+ * Free up all allocated resources used by the ports. This includes
+ * memory and interrupts.
+ */
if (stli_timeron) {
stli_timeron = 0;
- del_timer(&stli_timerlist);
+ del_timer_sync(&stli_timerlist);
}
i = tty_unregister_driver(stli_serial);
if (i) {
printk("STALLION: failed to un-register tty driver, "
"errno=%d\n", -i);
- restore_flags(flags);
return;
}
put_tty_driver(stli_serial);
@@ -859,16 +836,15 @@ static void __exit istallion_module_exit(void)
printk("STALLION: failed to un-register serial memory device, "
"errno=%d\n", -i);
- kfree(stli_tmpwritebuf);
kfree(stli_txcookbuf);
for (i = 0; (i < stli_nrbrds); i++) {
- if ((brdp = stli_brds[i]) == (stlibrd_t *) NULL)
+ if ((brdp = stli_brds[i]) == NULL)
continue;
for (j = 0; (j < STL_MAXPORTS); j++) {
portp = brdp->ports[j];
- if (portp != (stliport_t *) NULL) {
- if (portp->tty != (struct tty_struct *) NULL)
+ if (portp != NULL) {
+ if (portp->tty != NULL)
tty_hangup(portp->tty);
kfree(portp);
}
@@ -878,10 +854,8 @@ static void __exit istallion_module_exit(void)
if (brdp->iosize > 0)
release_region(brdp->iobase, brdp->iosize);
kfree(brdp);
- stli_brds[i] = (stlibrd_t *) NULL;
+ stli_brds[i] = NULL;
}
-
- restore_flags(flags);
}
module_init(istallion_module_init);
@@ -895,19 +869,15 @@ module_exit(istallion_module_exit);
static void stli_argbrds(void)
{
- stlconf_t conf;
- stlibrd_t *brdp;
- int i;
-
-#ifdef DEBUG
- printk("stli_argbrds()\n");
-#endif
+ stlconf_t conf;
+ stlibrd_t *brdp;
+ int i;
for (i = stli_nrbrds; i < ARRAY_SIZE(stli_brdsp); i++) {
memset(&conf, 0, sizeof(conf));
if (stli_parsebrd(&conf, stli_brdsp[i]) == 0)
continue;
- if ((brdp = stli_allocbrd()) == (stlibrd_t *) NULL)
+ if ((brdp = stli_allocbrd()) == NULL)
continue;
stli_nrbrds = i + 1;
brdp->brdnr = i;
@@ -926,9 +896,9 @@ static void stli_argbrds(void)
static unsigned long stli_atol(char *str)
{
- unsigned long val;
- int base, c;
- char *sp;
+ unsigned long val;
+ int base, c;
+ char *sp;
val = 0;
sp = str;
@@ -962,15 +932,11 @@ static unsigned long stli_atol(char *str)
static int stli_parsebrd(stlconf_t *confp, char **argp)
{
- char *sp;
- int i;
-
-#ifdef DEBUG
- printk("stli_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
-#endif
+ char *sp;
+ int i;
- if ((argp[0] == (char *) NULL) || (*argp[0] == 0))
- return(0);
+ if (argp[0] == NULL || *argp[0] == 0)
+ return 0;
for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++)
*sp = TOLOWER(*sp);
@@ -985,9 +951,9 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
}
confp->brdtype = stli_brdstr[i].type;
- if ((argp[1] != (char *) NULL) && (*argp[1] != 0))
+ if (argp[1] != NULL && *argp[1] != 0)
confp->ioaddr1 = stli_atol(argp[1]);
- if ((argp[2] != (char *) NULL) && (*argp[2] != 0))
+ if (argp[2] != NULL && *argp[2] != 0)
confp->memaddr = stli_atol(argp[2]);
return(1);
}
@@ -998,34 +964,29 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
static int stli_open(struct tty_struct *tty, struct file *filp)
{
- stlibrd_t *brdp;
- stliport_t *portp;
- unsigned int minordev;
- int brdnr, portnr, rc;
-
-#ifdef DEBUG
- printk("stli_open(tty=%x,filp=%x): device=%s\n", (int) tty,
- (int) filp, tty->name);
-#endif
+ stlibrd_t *brdp;
+ stliport_t *portp;
+ unsigned int minordev;
+ int brdnr, portnr, rc;
minordev = tty->index;
brdnr = MINOR2BRD(minordev);
if (brdnr >= stli_nrbrds)
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
if ((brdp->state & BST_STARTED) == 0)
- return(-ENODEV);
+ return -ENODEV;
portnr = MINOR2PORT(minordev);
if ((portnr < 0) || (portnr > brdp->nrports))
- return(-ENODEV);
+ return -ENODEV;
portp = brdp->ports[portnr];
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
+ if (portp == NULL)
+ return -ENODEV;
if (portp->devnr < 1)
- return(-ENODEV);
+ return -ENODEV;
/*
@@ -1037,8 +998,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
if (portp->flags & ASYNC_CLOSING) {
interruptible_sleep_on(&portp->close_wait);
if (portp->flags & ASYNC_HUP_NOTIFY)
- return(-EAGAIN);
- return(-ERESTARTSYS);
+ return -EAGAIN;
+ return -ERESTARTSYS;
}
/*
@@ -1054,7 +1015,7 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_INITIALIZING, &portp->state));
if (signal_pending(current))
- return(-ERESTARTSYS);
+ return -ERESTARTSYS;
if ((portp->flags & ASYNC_INITIALIZED) == 0) {
set_bit(ST_INITIALIZING, &portp->state);
@@ -1065,7 +1026,7 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
clear_bit(ST_INITIALIZING, &portp->state);
wake_up_interruptible(&portp->raw_wait);
if (rc < 0)
- return(rc);
+ return rc;
}
/*
@@ -1077,8 +1038,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
if (portp->flags & ASYNC_CLOSING) {
interruptible_sleep_on(&portp->close_wait);
if (portp->flags & ASYNC_HUP_NOTIFY)
- return(-EAGAIN);
- return(-ERESTARTSYS);
+ return -EAGAIN;
+ return -ERESTARTSYS;
}
/*
@@ -1088,38 +1049,33 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
*/
if (!(filp->f_flags & O_NONBLOCK)) {
if ((rc = stli_waitcarrier(brdp, portp, filp)) != 0)
- return(rc);
+ return rc;
}
portp->flags |= ASYNC_NORMAL_ACTIVE;
- return(0);
+ return 0;
}
/*****************************************************************************/
static void stli_close(struct tty_struct *tty, struct file *filp)
{
- stlibrd_t *brdp;
- stliport_t *portp;
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
-#endif
+ stlibrd_t *brdp;
+ stliport_t *portp;
+ unsigned long flags;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stli_lock, flags);
if (tty_hung_up_p(filp)) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stli_lock, flags);
return;
}
if ((tty->count == 1) && (portp->refcount != 1))
portp->refcount = 1;
if (portp->refcount-- > 1) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stli_lock, flags);
return;
}
@@ -1134,6 +1090,8 @@ static void stli_close(struct tty_struct *tty, struct file *filp)
if (tty == stli_txcooktty)
stli_flushchars(tty);
tty->closing = 1;
+ spin_unlock_irqrestore(&stli_lock, flags);
+
if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, portp->closing_wait);
@@ -1157,7 +1115,7 @@ static void stli_close(struct tty_struct *tty, struct file *filp)
stli_flushbuffer(tty);
tty->closing = 0;
- portp->tty = (struct tty_struct *) NULL;
+ portp->tty = NULL;
if (portp->openwaitcnt) {
if (portp->close_delay)
@@ -1167,7 +1125,6 @@ static void stli_close(struct tty_struct *tty, struct file *filp)
portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&portp->close_wait);
- restore_flags(flags);
}
/*****************************************************************************/
@@ -1182,45 +1139,41 @@ static void stli_close(struct tty_struct *tty, struct file *filp)
static int stli_initopen(stlibrd_t *brdp, stliport_t *portp)
{
- struct tty_struct *tty;
- asynotify_t nt;
- asyport_t aport;
- int rc;
-
-#ifdef DEBUG
- printk("stli_initopen(brdp=%x,portp=%x)\n", (int) brdp, (int) portp);
-#endif
+ struct tty_struct *tty;
+ asynotify_t nt;
+ asyport_t aport;
+ int rc;
if ((rc = stli_rawopen(brdp, portp, 0, 1)) < 0)
- return(rc);
+ return rc;
memset(&nt, 0, sizeof(asynotify_t));
nt.data = (DT_TXLOW | DT_TXEMPTY | DT_RXBUSY | DT_RXBREAK);
nt.signal = SG_DCD;
if ((rc = stli_cmdwait(brdp, portp, A_SETNOTIFY, &nt,
sizeof(asynotify_t), 0)) < 0)
- return(rc);
+ return rc;
tty = portp->tty;
- if (tty == (struct tty_struct *) NULL)
- return(-ENODEV);
+ if (tty == NULL)
+ return -ENODEV;
stli_mkasyport(portp, &aport, tty->termios);
if ((rc = stli_cmdwait(brdp, portp, A_SETPORT, &aport,
sizeof(asyport_t), 0)) < 0)
- return(rc);
+ return rc;
set_bit(ST_GETSIGS, &portp->state);
if ((rc = stli_cmdwait(brdp, portp, A_GETSIGNALS, &portp->asig,
sizeof(asysigs_t), 1)) < 0)
- return(rc);
+ return rc;
if (test_and_clear_bit(ST_GETSIGS, &portp->state))
portp->sigs = stli_mktiocm(portp->asig.sigvalue);
stli_mkasysigs(&portp->asig, 1, 1);
if ((rc = stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
sizeof(asysigs_t), 0)) < 0)
- return(rc);
+ return rc;
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -1234,22 +1187,15 @@ static int stli_initopen(stlibrd_t *brdp, stliport_t *portp)
static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait)
{
- volatile cdkhdr_t *hdrp;
- volatile cdkctrl_t *cp;
- volatile unsigned char *bits;
- unsigned long flags;
- int rc;
-
-#ifdef DEBUG
- printk("stli_rawopen(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
- (int) brdp, (int) portp, (int) arg, wait);
-#endif
+ cdkhdr_t __iomem *hdrp;
+ cdkctrl_t __iomem *cp;
+ unsigned char __iomem *bits;
+ unsigned long flags;
+ int rc;
/*
* Send a message to the slave to open this port.
*/
- save_flags(flags);
- cli();
/*
* Slave is already closing this port. This can happen if a hangup
@@ -1260,7 +1206,6 @@ static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, i
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current)) {
- restore_flags(flags);
return -ERESTARTSYS;
}
@@ -1269,19 +1214,20 @@ static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, i
* memory. Once the message is in set the service bits to say that
* this port wants service.
*/
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- cp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
- cp->openarg = arg;
- cp->open = 1;
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((volatile unsigned char *) hdrp) + brdp->slaveoffset +
+ cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
+ writel(arg, &cp->openarg);
+ writeb(1, &cp->open);
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
portp->portidx;
- *bits |= portp->portbit;
+ writeb(readb(bits) | portp->portbit, bits);
EBRDDISABLE(brdp);
if (wait == 0) {
- restore_flags(flags);
- return(0);
+ spin_unlock_irqrestore(&brd_lock, flags);
+ return 0;
}
/*
@@ -1290,15 +1236,16 @@ static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, i
*/
rc = 0;
set_bit(ST_OPENING, &portp->state);
+ spin_unlock_irqrestore(&brd_lock, flags);
+
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_OPENING, &portp->state));
if (signal_pending(current))
rc = -ERESTARTSYS;
- restore_flags(flags);
if ((rc == 0) && (portp->rc != 0))
rc = -EIO;
- return(rc);
+ return rc;
}
/*****************************************************************************/
@@ -1311,19 +1258,11 @@ static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, i
static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait)
{
- volatile cdkhdr_t *hdrp;
- volatile cdkctrl_t *cp;
- volatile unsigned char *bits;
- unsigned long flags;
- int rc;
-
-#ifdef DEBUG
- printk("stli_rawclose(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
- (int) brdp, (int) portp, (int) arg, wait);
-#endif
-
- save_flags(flags);
- cli();
+ cdkhdr_t __iomem *hdrp;
+ cdkctrl_t __iomem *cp;
+ unsigned char __iomem *bits;
+ unsigned long flags;
+ int rc;
/*
* Slave is already closing this port. This can happen if a hangup
@@ -1333,7 +1272,6 @@ static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg,
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current)) {
- restore_flags(flags);
return -ERESTARTSYS;
}
}
@@ -1341,21 +1279,22 @@ static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg,
/*
* Write the close command into shared memory.
*/
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- cp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
- cp->closearg = arg;
- cp->close = 1;
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((volatile unsigned char *) hdrp) + brdp->slaveoffset +
+ cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
+ writel(arg, &cp->closearg);
+ writeb(1, &cp->close);
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
portp->portidx;
- *bits |= portp->portbit;
+ writeb(readb(bits) |portp->portbit, bits);
EBRDDISABLE(brdp);
set_bit(ST_CLOSING, &portp->state);
- if (wait == 0) {
- restore_flags(flags);
- return(0);
- }
+ spin_unlock_irqrestore(&brd_lock, flags);
+
+ if (wait == 0)
+ return 0;
/*
* Slave is in action, so now we must wait for the open acknowledgment
@@ -1366,11 +1305,10 @@ static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current))
rc = -ERESTARTSYS;
- restore_flags(flags);
if ((rc == 0) && (portp->rc != 0))
rc = -EIO;
- return(rc);
+ return rc;
}
/*****************************************************************************/
@@ -1384,36 +1322,21 @@ static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg,
static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback)
{
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_cmdwait(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
- "copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
- (int) arg, size, copyback);
-#endif
-
- save_flags(flags);
- cli();
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_CMDING, &portp->state));
- if (signal_pending(current)) {
- restore_flags(flags);
+ if (signal_pending(current))
return -ERESTARTSYS;
- }
stli_sendcmd(brdp, portp, cmd, arg, size, copyback);
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_CMDING, &portp->state));
- if (signal_pending(current)) {
- restore_flags(flags);
+ if (signal_pending(current))
return -ERESTARTSYS;
- }
- restore_flags(flags);
if (portp->rc != 0)
- return(-EIO);
- return(0);
+ return -EIO;
+ return 0;
}
/*****************************************************************************/
@@ -1425,22 +1348,18 @@ static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, v
static int stli_setport(stliport_t *portp)
{
- stlibrd_t *brdp;
- asyport_t aport;
-
-#ifdef DEBUG
- printk("stli_setport(portp=%x)\n", (int) portp);
-#endif
+ stlibrd_t *brdp;
+ asyport_t aport;
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
- if (portp->tty == (struct tty_struct *) NULL)
- return(-ENODEV);
- if ((portp->brdnr < 0) && (portp->brdnr >= stli_nrbrds))
- return(-ENODEV);
+ if (portp == NULL)
+ return -ENODEV;
+ if (portp->tty == NULL)
+ return -ENODEV;
+ if (portp->brdnr < 0 && portp->brdnr >= stli_nrbrds)
+ return -ENODEV;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
stli_mkasyport(portp, &aport, portp->tty->termios);
return(stli_cmdwait(brdp, portp, A_SETPORT, &aport, sizeof(asyport_t), 0));
@@ -1455,13 +1374,8 @@ static int stli_setport(stliport_t *portp)
static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp)
{
- unsigned long flags;
- int rc, doclocal;
-
-#ifdef DEBUG
- printk("stli_waitcarrier(brdp=%x,portp=%x,filp=%x)\n",
- (int) brdp, (int) portp, (int) filp);
-#endif
+ unsigned long flags;
+ int rc, doclocal;
rc = 0;
doclocal = 0;
@@ -1469,11 +1383,11 @@ static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *fil
if (portp->tty->termios->c_cflag & CLOCAL)
doclocal++;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stli_lock, flags);
portp->openwaitcnt++;
if (! tty_hung_up_p(filp))
portp->refcount--;
+ spin_unlock_irqrestore(&stli_lock, flags);
for (;;) {
stli_mkasysigs(&portp->asig, 1, 1);
@@ -1499,12 +1413,13 @@ static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *fil
interruptible_sleep_on(&portp->open_wait);
}
+ spin_lock_irqsave(&stli_lock, flags);
if (! tty_hung_up_p(filp))
portp->refcount++;
portp->openwaitcnt--;
- restore_flags(flags);
+ spin_unlock_irqrestore(&stli_lock, flags);
- return(rc);
+ return rc;
}
/*****************************************************************************/
@@ -1517,46 +1432,38 @@ static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *fil
static int stli_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
- volatile cdkasy_t *ap;
- volatile cdkhdr_t *hdrp;
- volatile unsigned char *bits;
- unsigned char *shbuf, *chbuf;
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned int len, stlen, head, tail, size;
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_write(tty=%x,buf=%x,count=%d)\n",
- (int) tty, (int) buf, count);
-#endif
+ cdkasy_t __iomem *ap;
+ cdkhdr_t __iomem *hdrp;
+ unsigned char __iomem *bits;
+ unsigned char __iomem *shbuf;
+ unsigned char *chbuf;
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned int len, stlen, head, tail, size;
+ unsigned long flags;
- if ((tty == (struct tty_struct *) NULL) ||
- (stli_tmpwritebuf == (char *) NULL))
- return(0);
if (tty == stli_txcooktty)
stli_flushchars(tty);
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return(0);
+ if (portp == NULL)
+ return 0;
if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
chbuf = (unsigned char *) buf;
/*
* All data is now local, shove as much as possible into shared memory.
*/
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
- head = (unsigned int) ap->txq.head;
- tail = (unsigned int) ap->txq.tail;
- if (tail != ((unsigned int) ap->txq.tail))
- tail = (unsigned int) ap->txq.tail;
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
+ head = (unsigned int) readw(&ap->txq.head);
+ tail = (unsigned int) readw(&ap->txq.tail);
+ if (tail != ((unsigned int) readw(&ap->txq.tail)))
+ tail = (unsigned int) readw(&ap->txq.tail);
size = portp->txsize;
if (head >= tail) {
len = size - (head - tail) - 1;
@@ -1568,11 +1475,11 @@ static int stli_write(struct tty_struct *tty, const unsigned char *buf, int coun
len = MIN(len, count);
count = 0;
- shbuf = (char *) EBRDGETMEMPTR(brdp, portp->txoffset);
+ shbuf = (char __iomem *) EBRDGETMEMPTR(brdp, portp->txoffset);
while (len > 0) {
stlen = MIN(len, stlen);
- memcpy((shbuf + head), chbuf, stlen);
+ memcpy_toio(shbuf + head, chbuf, stlen);
chbuf += stlen;
len -= stlen;
count += stlen;
@@ -1583,20 +1490,19 @@ static int stli_write(struct tty_struct *tty, const unsigned char *buf, int coun
}
}
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
- ap->txq.head = head;
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
+ writew(head, &ap->txq.head);
if (test_bit(ST_TXBUSY, &portp->state)) {
- if (ap->changed.data & DT_TXEMPTY)
- ap->changed.data &= ~DT_TXEMPTY;
+ if (readl(&ap->changed.data) & DT_TXEMPTY)
+ writel(readl(&ap->changed.data) & ~DT_TXEMPTY, &ap->changed.data);
}
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((volatile unsigned char *) hdrp) + brdp->slaveoffset +
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
portp->portidx;
- *bits |= portp->portbit;
+ writeb(readb(bits) | portp->portbit, bits);
set_bit(ST_TXBUSY, &portp->state);
EBRDDISABLE(brdp);
-
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
return(count);
}
@@ -1613,14 +1519,8 @@ static int stli_write(struct tty_struct *tty, const unsigned char *buf, int coun
static void stli_putchar(struct tty_struct *tty, unsigned char ch)
{
-#ifdef DEBUG
- printk("stli_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
if (tty != stli_txcooktty) {
- if (stli_txcooktty != (struct tty_struct *) NULL)
+ if (stli_txcooktty != NULL)
stli_flushchars(stli_txcooktty);
stli_txcooktty = tty;
}
@@ -1640,29 +1540,26 @@ static void stli_putchar(struct tty_struct *tty, unsigned char ch)
static void stli_flushchars(struct tty_struct *tty)
{
- volatile cdkhdr_t *hdrp;
- volatile unsigned char *bits;
- volatile cdkasy_t *ap;
- struct tty_struct *cooktty;
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned int len, stlen, head, tail, size, count, cooksize;
- unsigned char *buf, *shbuf;
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_flushchars(tty=%x)\n", (int) tty);
-#endif
+ cdkhdr_t __iomem *hdrp;
+ unsigned char __iomem *bits;
+ cdkasy_t __iomem *ap;
+ struct tty_struct *cooktty;
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned int len, stlen, head, tail, size, count, cooksize;
+ unsigned char *buf;
+ unsigned char __iomem *shbuf;
+ unsigned long flags;
cooksize = stli_txcooksize;
cooktty = stli_txcooktty;
stli_txcooksize = 0;
stli_txcookrealsize = 0;
- stli_txcooktty = (struct tty_struct *) NULL;
+ stli_txcooktty = NULL;
- if (tty == (struct tty_struct *) NULL)
+ if (tty == NULL)
return;
- if (cooktty == (struct tty_struct *) NULL)
+ if (cooktty == NULL)
return;
if (tty != cooktty)
tty = cooktty;
@@ -1670,23 +1567,22 @@ static void stli_flushchars(struct tty_struct *tty)
return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
- head = (unsigned int) ap->txq.head;
- tail = (unsigned int) ap->txq.tail;
- if (tail != ((unsigned int) ap->txq.tail))
- tail = (unsigned int) ap->txq.tail;
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
+ head = (unsigned int) readw(&ap->txq.head);
+ tail = (unsigned int) readw(&ap->txq.tail);
+ if (tail != ((unsigned int) readw(&ap->txq.tail)))
+ tail = (unsigned int) readw(&ap->txq.tail);
size = portp->txsize;
if (head >= tail) {
len = size - (head - tail) - 1;
@@ -1703,7 +1599,7 @@ static void stli_flushchars(struct tty_struct *tty)
while (len > 0) {
stlen = MIN(len, stlen);
- memcpy((shbuf + head), buf, stlen);
+ memcpy_toio(shbuf + head, buf, stlen);
buf += stlen;
len -= stlen;
count += stlen;
@@ -1714,73 +1610,66 @@ static void stli_flushchars(struct tty_struct *tty)
}
}
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
- ap->txq.head = head;
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
+ writew(head, &ap->txq.head);
if (test_bit(ST_TXBUSY, &portp->state)) {
- if (ap->changed.data & DT_TXEMPTY)
- ap->changed.data &= ~DT_TXEMPTY;
+ if (readl(&ap->changed.data) & DT_TXEMPTY)
+ writel(readl(&ap->changed.data) & ~DT_TXEMPTY, &ap->changed.data);
}
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((volatile unsigned char *) hdrp) + brdp->slaveoffset +
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
portp->portidx;
- *bits |= portp->portbit;
+ writeb(readb(bits) | portp->portbit, bits);
set_bit(ST_TXBUSY, &portp->state);
EBRDDISABLE(brdp);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
static int stli_writeroom(struct tty_struct *tty)
{
- volatile cdkasyrq_t *rp;
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned int head, tail, len;
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_writeroom(tty=%x)\n", (int) tty);
-#endif
+ cdkasyrq_t __iomem *rp;
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned int head, tail, len;
+ unsigned long flags;
- if (tty == (struct tty_struct *) NULL)
- return(0);
if (tty == stli_txcooktty) {
if (stli_txcookrealsize != 0) {
len = stli_txcookrealsize - stli_txcooksize;
- return(len);
+ return len;
}
}
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return(0);
+ if (portp == NULL)
+ return 0;
if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- rp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
- head = (unsigned int) rp->head;
- tail = (unsigned int) rp->tail;
- if (tail != ((unsigned int) rp->tail))
- tail = (unsigned int) rp->tail;
+ rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
+ head = (unsigned int) readw(&rp->head);
+ tail = (unsigned int) readw(&rp->tail);
+ if (tail != ((unsigned int) readw(&rp->tail)))
+ tail = (unsigned int) readw(&rp->tail);
len = (head >= tail) ? (portp->txsize - (head - tail)) : (tail - head);
len--;
EBRDDISABLE(brdp);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
if (tty == stli_txcooktty) {
stli_txcookrealsize = len;
len -= stli_txcooksize;
}
- return(len);
+ return len;
}
/*****************************************************************************/
@@ -1795,44 +1684,37 @@ static int stli_writeroom(struct tty_struct *tty)
static int stli_charsinbuffer(struct tty_struct *tty)
{
- volatile cdkasyrq_t *rp;
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned int head, tail, len;
- unsigned long flags;
-
-#ifdef DEBUG
- printk("stli_charsinbuffer(tty=%x)\n", (int) tty);
-#endif
+ cdkasyrq_t __iomem *rp;
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned int head, tail, len;
+ unsigned long flags;
- if (tty == (struct tty_struct *) NULL)
- return(0);
if (tty == stli_txcooktty)
stli_flushchars(tty);
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return(0);
+ if (portp == NULL)
+ return 0;
if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- rp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
- head = (unsigned int) rp->head;
- tail = (unsigned int) rp->tail;
- if (tail != ((unsigned int) rp->tail))
- tail = (unsigned int) rp->tail;
+ rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
+ head = (unsigned int) readw(&rp->head);
+ tail = (unsigned int) readw(&rp->tail);
+ if (tail != ((unsigned int) readw(&rp->tail)))
+ tail = (unsigned int) readw(&rp->tail);
len = (head >= tail) ? (head - tail) : (portp->txsize - (tail - head));
if ((len == 0) && test_bit(ST_TXBUSY, &portp->state))
len = 1;
EBRDDISABLE(brdp);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
- return(len);
+ return len;
}
/*****************************************************************************/
@@ -1843,12 +1725,8 @@ static int stli_charsinbuffer(struct tty_struct *tty)
static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
{
- struct serial_struct sio;
- stlibrd_t *brdp;
-
-#ifdef DEBUG
- printk("stli_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
-#endif
+ struct serial_struct sio;
+ stlibrd_t *brdp;
memset(&sio, 0, sizeof(struct serial_struct));
sio.type = PORT_UNKNOWN;
@@ -1863,7 +1741,7 @@ static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
sio.hub6 = 0;
brdp = stli_brds[portp->brdnr];
- if (brdp != (stlibrd_t *) NULL)
+ if (brdp != NULL)
sio.port = brdp->iobase;
return copy_to_user(sp, &sio, sizeof(struct serial_struct)) ?
@@ -1880,12 +1758,8 @@ static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
{
- struct serial_struct sio;
- int rc;
-
-#ifdef DEBUG
- printk("stli_setserial(portp=%p,sp=%p)\n", portp, sp);
-#endif
+ struct serial_struct sio;
+ int rc;
if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
return -EFAULT;
@@ -1894,7 +1768,7 @@ static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
(sio.close_delay != portp->close_delay) ||
((sio.flags & ~ASYNC_USR_MASK) !=
(portp->flags & ~ASYNC_USR_MASK)))
- return(-EPERM);
+ return -EPERM;
}
portp->flags = (portp->flags & ~ASYNC_USR_MASK) |
@@ -1905,8 +1779,8 @@ static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
portp->custom_divisor = sio.custom_divisor;
if ((rc = stli_setport(portp)) < 0)
- return(rc);
- return(0);
+ return rc;
+ return 0;
}
/*****************************************************************************/
@@ -1917,19 +1791,19 @@ static int stli_tiocmget(struct tty_struct *tty, struct file *file)
stlibrd_t *brdp;
int rc;
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ if (portp == NULL)
+ return -ENODEV;
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
if (tty->flags & (1 << TTY_IO_ERROR))
- return(-EIO);
+ return -EIO;
if ((rc = stli_cmdwait(brdp, portp, A_GETSIGNALS,
&portp->asig, sizeof(asysigs_t), 1)) < 0)
- return(rc);
+ return rc;
return stli_mktiocm(portp->asig.sigvalue);
}
@@ -1941,15 +1815,15 @@ static int stli_tiocmset(struct tty_struct *tty, struct file *file,
stlibrd_t *brdp;
int rts = -1, dtr = -1;
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ if (portp == NULL)
+ return -ENODEV;
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
if (tty->flags & (1 << TTY_IO_ERROR))
- return(-EIO);
+ return -EIO;
if (set & TIOCM_RTS)
rts = 1;
@@ -1968,32 +1842,25 @@ static int stli_tiocmset(struct tty_struct *tty, struct file *file,
static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
{
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned int ival;
- int rc;
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned int ival;
+ int rc;
void __user *argp = (void __user *)arg;
-#ifdef DEBUG
- printk("stli_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
- (int) tty, (int) file, cmd, (int) arg);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return(-ENODEV);
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return(0);
+ if (portp == NULL)
+ return -ENODEV;
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
+ return 0;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(0);
+ if (brdp == NULL)
+ return 0;
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != COM_GETPORTSTATS) && (cmd != COM_CLRPORTSTATS)) {
if (tty->flags & (1 << TTY_IO_ERROR))
- return(-EIO);
+ return -EIO;
}
rc = 0;
@@ -2040,7 +1907,7 @@ static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cm
break;
}
- return(rc);
+ return rc;
}
/*****************************************************************************/
@@ -2052,24 +1919,20 @@ static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cm
static void stli_settermios(struct tty_struct *tty, struct termios *old)
{
- stliport_t *portp;
- stlibrd_t *brdp;
- struct termios *tiosp;
- asyport_t aport;
-
-#ifdef DEBUG
- printk("stli_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
-#endif
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ struct termios *tiosp;
+ asyport_t aport;
- if (tty == (struct tty_struct *) NULL)
+ if (tty == NULL)
return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
tiosp = tty->termios;
@@ -2102,18 +1965,9 @@ static void stli_settermios(struct tty_struct *tty, struct termios *old)
static void stli_throttle(struct tty_struct *tty)
{
- stliport_t *portp;
-
-#ifdef DEBUG
- printk("stli_throttle(tty=%x)\n", (int) tty);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
+ stliport_t *portp = tty->driver_data;
+ if (portp == NULL)
return;
- portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return;
-
set_bit(ST_RXSTOP, &portp->state);
}
@@ -2127,88 +1981,30 @@ static void stli_throttle(struct tty_struct *tty)
static void stli_unthrottle(struct tty_struct *tty)
{
- stliport_t *portp;
-
-#ifdef DEBUG
- printk("stli_unthrottle(tty=%x)\n", (int) tty);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
- portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ stliport_t *portp = tty->driver_data;
+ if (portp == NULL)
return;
-
clear_bit(ST_RXSTOP, &portp->state);
}
/*****************************************************************************/
/*
- * Stop the transmitter. Basically to do this we will just turn TX
- * interrupts off.
+ * Stop the transmitter.
*/
static void stli_stop(struct tty_struct *tty)
{
- stlibrd_t *brdp;
- stliport_t *portp;
- asyctrl_t actrl;
-
-#ifdef DEBUG
- printk("stli_stop(tty=%x)\n", (int) tty);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
- portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return;
-
- memset(&actrl, 0, sizeof(asyctrl_t));
- actrl.txctrl = CT_STOPFLOW;
-#if 0
- stli_cmdwait(brdp, portp, A_PORTCTRL, &actrl, sizeof(asyctrl_t), 0);
-#endif
}
/*****************************************************************************/
/*
- * Start the transmitter again. Just turn TX interrupts back on.
+ * Start the transmitter again.
*/
static void stli_start(struct tty_struct *tty)
{
- stliport_t *portp;
- stlibrd_t *brdp;
- asyctrl_t actrl;
-
-#ifdef DEBUG
- printk("stli_start(tty=%x)\n", (int) tty);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
- portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
- return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return;
-
- memset(&actrl, 0, sizeof(asyctrl_t));
- actrl.txctrl = CT_STARTFLOW;
-#if 0
- stli_cmdwait(brdp, portp, A_PORTCTRL, &actrl, sizeof(asyctrl_t), 0);
-#endif
}
/*****************************************************************************/
@@ -2224,22 +2020,9 @@ static void stli_start(struct tty_struct *tty)
static void stli_dohangup(void *arg)
{
- stliport_t *portp;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_dohangup(portp=%x)\n", (int) arg);
-#endif
-
- /*
- * FIXME: There's a module removal race here: tty_hangup
- * calls schedule_work which will call into this
- * driver later.
- */
- portp = (stliport_t *) arg;
- if (portp != (stliport_t *) NULL) {
- if (portp->tty != (struct tty_struct *) NULL) {
- tty_hangup(portp->tty);
- }
+ stliport_t *portp = (stliport_t *) arg;
+ if (portp->tty != NULL) {
+ tty_hangup(portp->tty);
}
}
@@ -2254,31 +2037,25 @@ static void stli_dohangup(void *arg)
static void stli_hangup(struct tty_struct *tty)
{
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned long flags;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_hangup(tty=%x)\n", (int) tty);
-#endif
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned long flags;
- if (tty == (struct tty_struct *) NULL)
- return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
portp->flags &= ~ASYNC_INITIALIZED;
- save_flags(flags);
- cli();
- if (! test_bit(ST_CLOSING, &portp->state))
+ if (!test_bit(ST_CLOSING, &portp->state))
stli_rawclose(brdp, portp, 0, 0);
+
+ spin_lock_irqsave(&stli_lock, flags);
if (tty->termios->c_cflag & HUPCL) {
stli_mkasysigs(&portp->asig, 0, 0);
if (test_bit(ST_CMDING, &portp->state)) {
@@ -2290,14 +2067,15 @@ static void stli_hangup(struct tty_struct *tty)
&portp->asig, sizeof(asysigs_t), 0);
}
}
- restore_flags(flags);
clear_bit(ST_TXBUSY, &portp->state);
clear_bit(ST_RXSTOP, &portp->state);
set_bit(TTY_IO_ERROR, &tty->flags);
- portp->tty = (struct tty_struct *) NULL;
+ portp->tty = NULL;
portp->flags &= ~ASYNC_NORMAL_ACTIVE;
portp->refcount = 0;
+ spin_unlock_irqrestore(&stli_lock, flags);
+
wake_up_interruptible(&portp->open_wait);
}
@@ -2312,29 +2090,22 @@ static void stli_hangup(struct tty_struct *tty)
static void stli_flushbuffer(struct tty_struct *tty)
{
- stliport_t *portp;
- stlibrd_t *brdp;
- unsigned long ftype, flags;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_flushbuffer(tty=%x)\n", (int) tty);
-#endif
+ stliport_t *portp;
+ stlibrd_t *brdp;
+ unsigned long ftype, flags;
- if (tty == (struct tty_struct *) NULL)
- return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
if (tty == stli_txcooktty) {
- stli_txcooktty = (struct tty_struct *) NULL;
+ stli_txcooktty = NULL;
stli_txcooksize = 0;
stli_txcookrealsize = 0;
}
@@ -2346,15 +2117,10 @@ static void stli_flushbuffer(struct tty_struct *tty)
ftype |= FLUSHRX;
clear_bit(ST_DOFLUSHRX, &portp->state);
}
- stli_sendcmd(brdp, portp, A_FLUSH, &ftype,
- sizeof(unsigned long), 0);
+ __stli_sendcmd(brdp, portp, A_FLUSH, &ftype, sizeof(u32), 0);
}
- restore_flags(flags);
-
- wake_up_interruptible(&tty->write_wait);
- if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- tty->ldisc.write_wakeup)
- (tty->ldisc.write_wakeup)(tty);
+ spin_unlock_irqrestore(&brd_lock, flags);
+ tty_wakeup(tty);
}
/*****************************************************************************/
@@ -2364,55 +2130,31 @@ static void stli_breakctl(struct tty_struct *tty, int state)
stlibrd_t *brdp;
stliport_t *portp;
long arg;
- /* long savestate, savetime; */
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_breakctl(tty=%x,state=%d)\n", (int) tty, state);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
-/*
- * Due to a bug in the tty send_break() code we need to preserve
- * the current process state and timeout...
- savetime = current->timeout;
- savestate = current->state;
- */
-
arg = (state == -1) ? BREAKON : BREAKOFF;
stli_cmdwait(brdp, portp, A_BREAK, &arg, sizeof(long), 0);
-
-/*
- *
- current->timeout = savetime;
- current->state = savestate;
- */
}
/*****************************************************************************/
static void stli_waituntilsent(struct tty_struct *tty, int timeout)
{
- stliport_t *portp;
- unsigned long tend;
+ stliport_t *portp;
+ unsigned long tend;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_waituntilsent(tty=%x,timeout=%x)\n", (int) tty, timeout);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
+ if (tty == NULL)
return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
if (timeout == 0)
@@ -2436,19 +2178,13 @@ static void stli_sendxchar(struct tty_struct *tty, char ch)
stliport_t *portp;
asyctrl_t actrl;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
-#endif
-
- if (tty == (struct tty_struct *) NULL)
- return;
portp = tty->driver_data;
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
return;
- if ((portp->brdnr < 0) || (portp->brdnr >= stli_nrbrds))
+ if (portp->brdnr < 0 || portp->brdnr >= stli_nrbrds)
return;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
return;
memset(&actrl, 0, sizeof(asyctrl_t));
@@ -2460,7 +2196,6 @@ static void stli_sendxchar(struct tty_struct *tty, char ch)
actrl.txctrl = CT_SENDCHR;
actrl.tximdch = ch;
}
-
stli_cmdwait(brdp, portp, A_PORTCTRL, &actrl, sizeof(asyctrl_t), 0);
}
@@ -2476,17 +2211,17 @@ static void stli_sendxchar(struct tty_struct *tty, char ch)
static int stli_portinfo(stlibrd_t *brdp, stliport_t *portp, int portnr, char *pos)
{
- char *sp, *uart;
- int rc, cnt;
+ char *sp, *uart;
+ int rc, cnt;
rc = stli_portcmdstats(portp);
uart = "UNKNOWN";
if (brdp->state & BST_STARTED) {
switch (stli_comstats.hwid) {
- case 0: uart = "2681"; break;
- case 1: uart = "SC26198"; break;
- default: uart = "CD1400"; break;
+ case 0: uart = "2681"; break;
+ case 1: uart = "SC26198"; break;
+ default:uart = "CD1400"; break;
}
}
@@ -2537,17 +2272,11 @@ static int stli_portinfo(stlibrd_t *brdp, stliport_t *portp, int portnr, char *p
static int stli_readproc(char *page, char **start, off_t off, int count, int *eof, void *data)
{
- stlibrd_t *brdp;
- stliport_t *portp;
- int brdnr, portnr, totalport;
- int curoff, maxoff;
- char *pos;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
- "data=%x\n", (int) page, (int) start, (int) off, count,
- (int) eof, (int) data);
-#endif
+ stlibrd_t *brdp;
+ stliport_t *portp;
+ int brdnr, portnr, totalport;
+ int curoff, maxoff;
+ char *pos;
pos = page;
totalport = 0;
@@ -2568,7 +2297,7 @@ static int stli_readproc(char *page, char **start, off_t off, int count, int *eo
*/
for (brdnr = 0; (brdnr < stli_nrbrds); brdnr++) {
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
continue;
if (brdp->state == 0)
continue;
@@ -2583,7 +2312,7 @@ static int stli_readproc(char *page, char **start, off_t off, int count, int *eo
for (portnr = 0; (portnr < brdp->nrports); portnr++,
totalport++) {
portp = brdp->ports[portnr];
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
continue;
if (off >= (curoff += MAXLINE))
continue;
@@ -2610,49 +2339,54 @@ stli_readdone:
* a poll routine that does not have user context. Therefore you cannot
* copy back directly into user space, or to the kernel stack of a
* process. This routine does not sleep, so can be called from anywhere.
+ *
+ * The caller must hold the brd_lock (see also stli_sendcmd the usual
+ * entry point)
*/
-static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback)
+static void __stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback)
{
- volatile cdkhdr_t *hdrp;
- volatile cdkctrl_t *cp;
- volatile unsigned char *bits;
- unsigned long flags;
+ cdkhdr_t __iomem *hdrp;
+ cdkctrl_t __iomem *cp;
+ unsigned char __iomem *bits;
+ unsigned long flags;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_sendcmd(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
- "copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
- (int) arg, size, copyback);
-#endif
-
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
if (test_bit(ST_CMDING, &portp->state)) {
printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
(int) cmd);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
return;
}
EBRDENABLE(brdp);
- cp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
+ cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
if (size > 0) {
- memcpy((void *) &(cp->args[0]), arg, size);
+ memcpy_toio((void __iomem *) &(cp->args[0]), arg, size);
if (copyback) {
portp->argp = arg;
portp->argsize = size;
}
}
- cp->status = 0;
- cp->cmd = cmd;
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((volatile unsigned char *) hdrp) + brdp->slaveoffset +
+ writel(0, &cp->status);
+ writel(cmd, &cp->cmd);
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
portp->portidx;
- *bits |= portp->portbit;
+ writeb(readb(bits) | portp->portbit, bits);
set_bit(ST_CMDING, &portp->state);
EBRDDISABLE(brdp);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
+}
+
+static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&brd_lock, flags);
+ __stli_sendcmd(brdp, portp, cmd, arg, size, copyback);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -2667,28 +2401,23 @@ static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd,
static void stli_read(stlibrd_t *brdp, stliport_t *portp)
{
- volatile cdkasyrq_t *rp;
- volatile char *shbuf;
+ cdkasyrq_t __iomem *rp;
+ char __iomem *shbuf;
struct tty_struct *tty;
- unsigned int head, tail, size;
- unsigned int len, stlen;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_read(brdp=%x,portp=%d)\n",
- (int) brdp, (int) portp);
-#endif
+ unsigned int head, tail, size;
+ unsigned int len, stlen;
if (test_bit(ST_RXSTOP, &portp->state))
return;
tty = portp->tty;
- if (tty == (struct tty_struct *) NULL)
+ if (tty == NULL)
return;
- rp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
- head = (unsigned int) rp->head;
- if (head != ((unsigned int) rp->head))
- head = (unsigned int) rp->head;
- tail = (unsigned int) rp->tail;
+ rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
+ head = (unsigned int) readw(&rp->head);
+ if (head != ((unsigned int) readw(&rp->head)))
+ head = (unsigned int) readw(&rp->head);
+ tail = (unsigned int) readw(&rp->tail);
size = portp->rxsize;
if (head >= tail) {
len = head - tail;
@@ -2699,12 +2428,15 @@ static void stli_read(stlibrd_t *brdp, stliport_t *portp)
}
len = tty_buffer_request_room(tty, len);
- /* FIXME : iomap ? */
- shbuf = (volatile char *) EBRDGETMEMPTR(brdp, portp->rxoffset);
+
+ shbuf = (char __iomem *) EBRDGETMEMPTR(brdp, portp->rxoffset);
while (len > 0) {
+ unsigned char *cptr;
+
stlen = MIN(len, stlen);
- tty_insert_flip_string(tty, (char *)(shbuf + tail), stlen);
+ tty_prepare_flip_string(tty, &cptr, stlen);
+ memcpy_fromio(cptr, shbuf + tail, stlen);
len -= stlen;
tail += stlen;
if (tail >= size) {
@@ -2712,8 +2444,8 @@ static void stli_read(stlibrd_t *brdp, stliport_t *portp)
stlen = head;
}
}
- rp = &((volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
- rp->tail = tail;
+ rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
+ writew(tail, &rp->tail);
if (head != tail)
set_bit(ST_RXING, &portp->state);
@@ -2729,9 +2461,9 @@ static void stli_read(stlibrd_t *brdp, stliport_t *portp)
* difficult to deal with them here.
*/
-static void stli_dodelaycmd(stliport_t *portp, volatile cdkctrl_t *cp)
+static void stli_dodelaycmd(stliport_t *portp, cdkctrl_t __iomem *cp)
{
- int cmd;
+ int cmd;
if (test_bit(ST_DOSIGS, &portp->state)) {
if (test_bit(ST_DOFLUSHTX, &portp->state) &&
@@ -2746,10 +2478,10 @@ static void stli_dodelaycmd(stliport_t *portp, volatile cdkctrl_t *cp)
clear_bit(ST_DOFLUSHTX, &portp->state);
clear_bit(ST_DOFLUSHRX, &portp->state);
clear_bit(ST_DOSIGS, &portp->state);
- memcpy((void *) &(cp->args[0]), (void *) &portp->asig,
+ memcpy_toio((void __iomem *) &(cp->args[0]), (void *) &portp->asig,
sizeof(asysigs_t));
- cp->status = 0;
- cp->cmd = cmd;
+ writel(0, &cp->status);
+ writel(cmd, &cp->cmd);
set_bit(ST_CMDING, &portp->state);
} else if (test_bit(ST_DOFLUSHTX, &portp->state) ||
test_bit(ST_DOFLUSHRX, &portp->state)) {
@@ -2757,9 +2489,9 @@ static void stli_dodelaycmd(stliport_t *portp, volatile cdkctrl_t *cp)
cmd |= ((test_bit(ST_DOFLUSHRX, &portp->state)) ? FLUSHRX : 0);
clear_bit(ST_DOFLUSHTX, &portp->state);
clear_bit(ST_DOFLUSHRX, &portp->state);
- memcpy((void *) &(cp->args[0]), (void *) &cmd, sizeof(int));
- cp->status = 0;
- cp->cmd = A_FLUSH;
+ memcpy_toio((void __iomem *) &(cp->args[0]), (void *) &cmd, sizeof(int));
+ writel(0, &cp->status);
+ writel(A_FLUSH, &cp->cmd);
set_bit(ST_CMDING, &portp->state);
}
}
@@ -2779,30 +2511,25 @@ static void stli_dodelaycmd(stliport_t *portp, volatile cdkctrl_t *cp)
static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
{
- volatile cdkasy_t *ap;
- volatile cdkctrl_t *cp;
- struct tty_struct *tty;
- asynotify_t nt;
- unsigned long oldsigs;
- int rc, donerx;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_hostcmd(brdp=%x,channr=%d)\n",
- (int) brdp, channr);
-#endif
-
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
+ cdkasy_t __iomem *ap;
+ cdkctrl_t __iomem *cp;
+ struct tty_struct *tty;
+ asynotify_t nt;
+ unsigned long oldsigs;
+ int rc, donerx;
+
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
cp = &ap->ctrl;
/*
* Check if we are waiting for an open completion message.
*/
if (test_bit(ST_OPENING, &portp->state)) {
- rc = (int) cp->openarg;
- if ((cp->open == 0) && (rc != 0)) {
+ rc = readl(&cp->openarg);
+ if (readb(&cp->open) == 0 && rc != 0) {
if (rc > 0)
rc--;
- cp->openarg = 0;
+ writel(0, &cp->openarg);
portp->rc = rc;
clear_bit(ST_OPENING, &portp->state);
wake_up_interruptible(&portp->raw_wait);
@@ -2813,11 +2540,11 @@ static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
* Check if we are waiting for a close completion message.
*/
if (test_bit(ST_CLOSING, &portp->state)) {
- rc = (int) cp->closearg;
- if ((cp->close == 0) && (rc != 0)) {
+ rc = (int) readl(&cp->closearg);
+ if (readb(&cp->close) == 0 && rc != 0) {
if (rc > 0)
rc--;
- cp->closearg = 0;
+ writel(0, &cp->closearg);
portp->rc = rc;
clear_bit(ST_CLOSING, &portp->state);
wake_up_interruptible(&portp->raw_wait);
@@ -2829,16 +2556,16 @@ static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
* need to copy out the command results associated with this command.
*/
if (test_bit(ST_CMDING, &portp->state)) {
- rc = cp->status;
- if ((cp->cmd == 0) && (rc != 0)) {
+ rc = readl(&cp->status);
+ if (readl(&cp->cmd) == 0 && rc != 0) {
if (rc > 0)
rc--;
- if (portp->argp != (void *) NULL) {
- memcpy(portp->argp, (void *) &(cp->args[0]),
+ if (portp->argp != NULL) {
+ memcpy_fromio(portp->argp, (void __iomem *) &(cp->args[0]),
portp->argsize);
- portp->argp = (void *) NULL;
+ portp->argp = NULL;
}
- cp->status = 0;
+ writel(0, &cp->status);
portp->rc = rc;
clear_bit(ST_CMDING, &portp->state);
stli_dodelaycmd(portp, cp);
@@ -2877,18 +2604,15 @@ static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
if (nt.data & DT_TXEMPTY)
clear_bit(ST_TXBUSY, &portp->state);
if (nt.data & (DT_TXEMPTY | DT_TXLOW)) {
- if (tty != (struct tty_struct *) NULL) {
- if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- tty->ldisc.write_wakeup) {
- (tty->ldisc.write_wakeup)(tty);
- EBRDENABLE(brdp);
- }
+ if (tty != NULL) {
+ tty_wakeup(tty);
+ EBRDENABLE(brdp);
wake_up_interruptible(&tty->write_wait);
}
}
if ((nt.data & DT_RXBREAK) && (portp->rxmarkmsk & BRKINT)) {
- if (tty != (struct tty_struct *) NULL) {
+ if (tty != NULL) {
tty_insert_flip_char(tty, 0, TTY_BREAK);
if (portp->flags & ASYNC_SAK) {
do_SAK(tty);
@@ -2932,14 +2656,14 @@ static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
* at the cdk header structure.
*/
-static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp)
+static void stli_brdpoll(stlibrd_t *brdp, cdkhdr_t __iomem *hdrp)
{
- stliport_t *portp;
- unsigned char hostbits[(STL_MAXCHANS / 8) + 1];
- unsigned char slavebits[(STL_MAXCHANS / 8) + 1];
- unsigned char *slavep;
- int bitpos, bitat, bitsize;
- int channr, nrdevs, slavebitchange;
+ stliport_t *portp;
+ unsigned char hostbits[(STL_MAXCHANS / 8) + 1];
+ unsigned char slavebits[(STL_MAXCHANS / 8) + 1];
+ unsigned char __iomem *slavep;
+ int bitpos, bitat, bitsize;
+ int channr, nrdevs, slavebitchange;
bitsize = brdp->bitsize;
nrdevs = brdp->nrdevs;
@@ -2951,7 +2675,7 @@ static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp)
* 8 service bits at a time in the inner loop, so we can bypass
* the lot if none of them want service.
*/
- memcpy(&hostbits[0], (((unsigned char *) hdrp) + brdp->hostoffset),
+ memcpy_fromio(&hostbits[0], (((unsigned char __iomem *) hdrp) + brdp->hostoffset),
bitsize);
memset(&slavebits[0], 0, bitsize);
@@ -2978,11 +2702,11 @@ static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp)
* service may initiate more slave requests.
*/
if (slavebitchange) {
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- slavep = ((unsigned char *) hdrp) + brdp->slaveoffset;
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ slavep = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset;
for (bitpos = 0; (bitpos < bitsize); bitpos++) {
- if (slavebits[bitpos])
- slavep[bitpos] &= ~slavebits[bitpos];
+ if (readb(slavebits + bitpos))
+ writeb(readb(slavep + bitpos) & ~slavebits[bitpos], slavebits + bitpos);
}
}
}
@@ -3000,9 +2724,9 @@ static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp)
static void stli_poll(unsigned long arg)
{
- volatile cdkhdr_t *hdrp;
- stlibrd_t *brdp;
- int brdnr;
+ cdkhdr_t __iomem *hdrp;
+ stlibrd_t *brdp;
+ int brdnr;
stli_timerlist.expires = STLI_TIMEOUT;
add_timer(&stli_timerlist);
@@ -3012,16 +2736,18 @@ static void stli_poll(unsigned long arg)
*/
for (brdnr = 0; (brdnr < stli_nrbrds); brdnr++) {
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
continue;
if ((brdp->state & BST_STARTED) == 0)
continue;
+ spin_lock(&brd_lock);
EBRDENABLE(brdp);
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- if (hdrp->hostreq)
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ if (readb(&hdrp->hostreq))
stli_brdpoll(brdp, hdrp);
EBRDDISABLE(brdp);
+ spin_unlock(&brd_lock);
}
}
@@ -3034,11 +2760,6 @@ static void stli_poll(unsigned long arg)
static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tiosp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_mkasyport(portp=%x,pp=%x,tiosp=%d)\n",
- (int) portp, (int) pp, (int) tiosp);
-#endif
-
memset(pp, 0, sizeof(asyport_t));
/*
@@ -3157,11 +2878,6 @@ static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tio
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_mkasysigs(sp=%x,dtr=%d,rts=%d)\n",
- (int) sp, dtr, rts);
-#endif
-
memset(sp, 0, sizeof(asysigs_t));
if (dtr >= 0) {
sp->signal |= SG_DTR;
@@ -3182,13 +2898,7 @@ static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
static long stli_mktiocm(unsigned long sigvalue)
{
- long tiocm;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_mktiocm(sigvalue=%x)\n", (int) sigvalue);
-#endif
-
- tiocm = 0;
+ long tiocm = 0;
tiocm |= ((sigvalue & SG_DCD) ? TIOCM_CD : 0);
tiocm |= ((sigvalue & SG_CTS) ? TIOCM_CTS : 0);
tiocm |= ((sigvalue & SG_RI) ? TIOCM_RI : 0);
@@ -3210,10 +2920,6 @@ static int stli_initports(stlibrd_t *brdp)
stliport_t *portp;
int i, panelnr, panelport;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_initports(brdp=%x)\n", (int) brdp);
-#endif
-
for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) {
portp = kzalloc(sizeof(stliport_t), GFP_KERNEL);
if (!portp) {
@@ -3240,7 +2946,7 @@ static int stli_initports(stlibrd_t *brdp)
brdp->ports[i] = portp;
}
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -3253,10 +2959,6 @@ static void stli_ecpinit(stlibrd_t *brdp)
{
unsigned long memconf;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpinit(brdp=%d)\n", (int) brdp);
-#endif
-
outb(ECP_ATSTOP, (brdp->iobase + ECP_ATCONFR));
udelay(10);
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
@@ -3270,9 +2972,6 @@ static void stli_ecpinit(stlibrd_t *brdp)
static void stli_ecpenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpenable(brdp=%x)\n", (int) brdp);
-#endif
outb(ECP_ATENABLE, (brdp->iobase + ECP_ATCONFR));
}
@@ -3280,9 +2979,6 @@ static void stli_ecpenable(stlibrd_t *brdp)
static void stli_ecpdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpdisable(brdp=%x)\n", (int) brdp);
-#endif
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
}
@@ -3290,13 +2986,8 @@ static void stli_ecpdisable(stlibrd_t *brdp)
static char *stli_ecpgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
- void *ptr;
- unsigned char val;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
- (int) offset);
-#endif
+ void *ptr;
+ unsigned char val;
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
@@ -3316,10 +3007,6 @@ static char *stli_ecpgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
static void stli_ecpreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpreset(brdp=%x)\n", (int) brdp);
-#endif
-
outb(ECP_ATSTOP, (brdp->iobase + ECP_ATCONFR));
udelay(10);
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
@@ -3330,9 +3017,6 @@ static void stli_ecpreset(stlibrd_t *brdp)
static void stli_ecpintr(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpintr(brdp=%x)\n", (int) brdp);
-#endif
outb(0x1, brdp->iobase);
}
@@ -3346,10 +3030,6 @@ static void stli_ecpeiinit(stlibrd_t *brdp)
{
unsigned long memconf;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpeiinit(brdp=%x)\n", (int) brdp);
-#endif
-
outb(0x1, (brdp->iobase + ECP_EIBRDENAB));
outb(ECP_EISTOP, (brdp->iobase + ECP_EICONFR));
udelay(10);
@@ -3383,11 +3063,6 @@ static char *stli_ecpeigetmemptr(stlibrd_t *brdp, unsigned long offset, int line
void *ptr;
unsigned char val;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecpeigetmemptr(brdp=%x,offset=%x,line=%d)\n",
- (int) brdp, (int) offset, line);
-#endif
-
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
@@ -3437,8 +3112,8 @@ static void stli_ecpmcdisable(stlibrd_t *brdp)
static char *stli_ecpmcgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
- void *ptr;
- unsigned char val;
+ void *ptr;
+ unsigned char val;
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
@@ -3472,10 +3147,6 @@ static void stli_ecpmcreset(stlibrd_t *brdp)
static void stli_ecppciinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecppciinit(brdp=%x)\n", (int) brdp);
-#endif
-
outb(ECP_PCISTOP, (brdp->iobase + ECP_PCICONFR));
udelay(10);
outb(0, (brdp->iobase + ECP_PCICONFR));
@@ -3489,11 +3160,6 @@ static char *stli_ecppcigetmemptr(stlibrd_t *brdp, unsigned long offset, int lin
void *ptr;
unsigned char val;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_ecppcigetmemptr(brdp=%x,offset=%x,line=%d)\n",
- (int) brdp, (int) offset, line);
-#endif
-
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), board=%d\n",
@@ -3528,10 +3194,6 @@ static void stli_onbinit(stlibrd_t *brdp)
{
unsigned long memconf;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbinit(brdp=%d)\n", (int) brdp);
-#endif
-
outb(ONB_ATSTOP, (brdp->iobase + ONB_ATCONFR));
udelay(10);
outb(ONB_ATDISABLE, (brdp->iobase + ONB_ATCONFR));
@@ -3547,9 +3209,6 @@ static void stli_onbinit(stlibrd_t *brdp)
static void stli_onbenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbenable(brdp=%x)\n", (int) brdp);
-#endif
outb((brdp->enabval | ONB_ATENABLE), (brdp->iobase + ONB_ATCONFR));
}
@@ -3557,9 +3216,6 @@ static void stli_onbenable(stlibrd_t *brdp)
static void stli_onbdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbdisable(brdp=%x)\n", (int) brdp);
-#endif
outb((brdp->enabval | ONB_ATDISABLE), (brdp->iobase + ONB_ATCONFR));
}
@@ -3569,11 +3225,6 @@ static char *stli_onbgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
void *ptr;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
- (int) offset);
-#endif
-
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
@@ -3589,11 +3240,6 @@ static char *stli_onbgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
static void stli_onbreset(stlibrd_t *brdp)
{
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbreset(brdp=%x)\n", (int) brdp);
-#endif
-
outb(ONB_ATSTOP, (brdp->iobase + ONB_ATCONFR));
udelay(10);
outb(ONB_ATDISABLE, (brdp->iobase + ONB_ATCONFR));
@@ -3610,10 +3256,6 @@ static void stli_onbeinit(stlibrd_t *brdp)
{
unsigned long memconf;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbeinit(brdp=%d)\n", (int) brdp);
-#endif
-
outb(0x1, (brdp->iobase + ONB_EIBRDENAB));
outb(ONB_EISTOP, (brdp->iobase + ONB_EICONFR));
udelay(10);
@@ -3632,9 +3274,6 @@ static void stli_onbeinit(stlibrd_t *brdp)
static void stli_onbeenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbeenable(brdp=%x)\n", (int) brdp);
-#endif
outb(ONB_EIENABLE, (brdp->iobase + ONB_EICONFR));
}
@@ -3642,9 +3281,6 @@ static void stli_onbeenable(stlibrd_t *brdp)
static void stli_onbedisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbedisable(brdp=%x)\n", (int) brdp);
-#endif
outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
}
@@ -3652,13 +3288,8 @@ static void stli_onbedisable(stlibrd_t *brdp)
static char *stli_onbegetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
- void *ptr;
- unsigned char val;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_onbegetmemptr(brdp=%x,offset=%x,line=%d)\n",
- (int) brdp, (int) offset, line);
-#endif
+ void *ptr;
+ unsigned char val;
if (offset > brdp->memsize) {
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
@@ -3681,11 +3312,6 @@ static char *stli_onbegetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
static void stli_onbereset(stlibrd_t *brdp)
{
-
-#ifdef DEBUG
- printk(KERN_ERR "stli_onbereset(brdp=%x)\n", (int) brdp);
-#endif
-
outb(ONB_EISTOP, (brdp->iobase + ONB_EICONFR));
udelay(10);
outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
@@ -3700,11 +3326,6 @@ static void stli_onbereset(stlibrd_t *brdp)
static void stli_bbyinit(stlibrd_t *brdp)
{
-
-#ifdef DEBUG
- printk(KERN_ERR "stli_bbyinit(brdp=%d)\n", (int) brdp);
-#endif
-
outb(BBY_ATSTOP, (brdp->iobase + BBY_ATCONFR));
udelay(10);
outb(0, (brdp->iobase + BBY_ATCONFR));
@@ -3717,24 +3338,13 @@ static void stli_bbyinit(stlibrd_t *brdp)
static char *stli_bbygetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
- void *ptr;
- unsigned char val;
+ void *ptr;
+ unsigned char val;
-#ifdef DEBUG
- printk(KERN_ERR "stli_bbygetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
- (int) offset);
-#endif
+ BUG_ON(offset > brdp->memsize);
- if (offset > brdp->memsize) {
- printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % BBY_PAGESIZE);
- val = (unsigned char) (offset / BBY_PAGESIZE);
- }
+ ptr = brdp->membase + (offset % BBY_PAGESIZE);
+ val = (unsigned char) (offset / BBY_PAGESIZE);
outb(val, (brdp->iobase + BBY_ATCONFR));
return(ptr);
}
@@ -3743,11 +3353,6 @@ static char *stli_bbygetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
static void stli_bbyreset(stlibrd_t *brdp)
{
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_bbyreset(brdp=%x)\n", (int) brdp);
-#endif
-
outb(BBY_ATSTOP, (brdp->iobase + BBY_ATCONFR));
udelay(10);
outb(0, (brdp->iobase + BBY_ATCONFR));
@@ -3762,11 +3367,6 @@ static void stli_bbyreset(stlibrd_t *brdp)
static void stli_stalinit(stlibrd_t *brdp)
{
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_stalinit(brdp=%d)\n", (int) brdp);
-#endif
-
outb(0x1, brdp->iobase);
mdelay(1000);
}
@@ -3775,36 +3375,18 @@ static void stli_stalinit(stlibrd_t *brdp)
static char *stli_stalgetmemptr(stlibrd_t *brdp, unsigned long offset, int line)
{
- void *ptr;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_stalgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
- (int) offset);
-#endif
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- } else {
- ptr = brdp->membase + (offset % STAL_PAGESIZE);
- }
- return(ptr);
+ BUG_ON(offset > brdp->memsize);
+ return brdp->membase + (offset % STAL_PAGESIZE);
}
/*****************************************************************************/
static void stli_stalreset(stlibrd_t *brdp)
{
- volatile unsigned long *vecp;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_stalreset(brdp=%x)\n", (int) brdp);
-#endif
+ u32 __iomem *vecp;
- vecp = (volatile unsigned long *) (brdp->membase + 0x30);
- *vecp = 0xffff0000;
+ vecp = (u32 __iomem *) (brdp->membase + 0x30);
+ writel(0xffff0000, vecp);
outb(0, brdp->iobase);
mdelay(1000);
}
@@ -3818,15 +3400,11 @@ static void stli_stalreset(stlibrd_t *brdp)
static int stli_initecp(stlibrd_t *brdp)
{
- cdkecpsig_t sig;
- cdkecpsig_t *sigsp;
- unsigned int status, nxtid;
- char *name;
- int panelnr, nrports;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_initecp(brdp=%x)\n", (int) brdp);
-#endif
+ cdkecpsig_t sig;
+ cdkecpsig_t __iomem *sigsp;
+ unsigned int status, nxtid;
+ char *name;
+ int panelnr, nrports;
if (!request_region(brdp->iobase, brdp->iosize, "istallion"))
return -EIO;
@@ -3834,7 +3412,7 @@ static int stli_initecp(stlibrd_t *brdp)
if ((brdp->iobase == 0) || (brdp->memaddr == 0))
{
release_region(brdp->iobase, brdp->iosize);
- return(-ENODEV);
+ return -ENODEV;
}
brdp->iosize = ECP_IOSIZE;
@@ -3903,7 +3481,7 @@ static int stli_initecp(stlibrd_t *brdp)
default:
release_region(brdp->iobase, brdp->iosize);
- return(-EINVAL);
+ return -EINVAL;
}
/*
@@ -3915,10 +3493,10 @@ static int stli_initecp(stlibrd_t *brdp)
EBRDINIT(brdp);
brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
- if (brdp->membase == (void *) NULL)
+ if (brdp->membase == NULL)
{
release_region(brdp->iobase, brdp->iosize);
- return(-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -3927,23 +3505,14 @@ static int stli_initecp(stlibrd_t *brdp)
* this is, and what it is connected to it.
*/
EBRDENABLE(brdp);
- sigsp = (cdkecpsig_t *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
+ sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
memcpy(&sig, sigsp, sizeof(cdkecpsig_t));
EBRDDISABLE(brdp);
-#if 0
- printk("%s(%d): sig-> magic=%x rom=%x panel=%x,%x,%x,%x,%x,%x,%x,%x\n",
- __FILE__, __LINE__, (int) sig.magic, sig.romver, sig.panelid[0],
- (int) sig.panelid[1], (int) sig.panelid[2],
- (int) sig.panelid[3], (int) sig.panelid[4],
- (int) sig.panelid[5], (int) sig.panelid[6],
- (int) sig.panelid[7]);
-#endif
-
- if (sig.magic != ECP_MAGIC)
+ if (sig.magic != cpu_to_le32(ECP_MAGIC))
{
release_region(brdp->iobase, brdp->iosize);
- return(-ENODEV);
+ return -ENODEV;
}
/*
@@ -3967,7 +3536,7 @@ static int stli_initecp(stlibrd_t *brdp)
brdp->state |= BST_FOUND;
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -3979,20 +3548,16 @@ static int stli_initecp(stlibrd_t *brdp)
static int stli_initonb(stlibrd_t *brdp)
{
- cdkonbsig_t sig;
- cdkonbsig_t *sigsp;
- char *name;
- int i;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_initonb(brdp=%x)\n", (int) brdp);
-#endif
+ cdkonbsig_t sig;
+ cdkonbsig_t __iomem *sigsp;
+ char *name;
+ int i;
/*
* Do a basic sanity check on the IO and memory addresses.
*/
- if ((brdp->iobase == 0) || (brdp->memaddr == 0))
- return(-ENODEV);
+ if (brdp->iobase == 0 || brdp->memaddr == 0)
+ return -ENODEV;
brdp->iosize = ONB_IOSIZE;
@@ -4010,7 +3575,6 @@ static int stli_initonb(stlibrd_t *brdp)
case BRD_ONBOARD2:
case BRD_ONBOARD2_32:
case BRD_ONBOARDRS:
- brdp->membase = (void *) brdp->memaddr;
brdp->memsize = ONB_MEMSIZE;
brdp->pagesize = ONB_ATPAGESIZE;
brdp->init = stli_onbinit;
@@ -4028,7 +3592,6 @@ static int stli_initonb(stlibrd_t *brdp)
break;
case BRD_ONBOARDE:
- brdp->membase = (void *) brdp->memaddr;
brdp->memsize = ONB_EIMEMSIZE;
brdp->pagesize = ONB_EIPAGESIZE;
brdp->init = stli_onbeinit;
@@ -4044,7 +3607,6 @@ static int stli_initonb(stlibrd_t *brdp)
case BRD_BRUMBY4:
case BRD_BRUMBY8:
case BRD_BRUMBY16:
- brdp->membase = (void *) brdp->memaddr;
brdp->memsize = BBY_MEMSIZE;
brdp->pagesize = BBY_PAGESIZE;
brdp->init = stli_bbyinit;
@@ -4058,7 +3620,6 @@ static int stli_initonb(stlibrd_t *brdp)
break;
case BRD_STALLION:
- brdp->membase = (void *) brdp->memaddr;
brdp->memsize = STAL_MEMSIZE;
brdp->pagesize = STAL_PAGESIZE;
brdp->init = stli_stalinit;
@@ -4073,7 +3634,7 @@ static int stli_initonb(stlibrd_t *brdp)
default:
release_region(brdp->iobase, brdp->iosize);
- return(-EINVAL);
+ return -EINVAL;
}
/*
@@ -4085,10 +3646,10 @@ static int stli_initonb(stlibrd_t *brdp)
EBRDINIT(brdp);
brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
- if (brdp->membase == (void *) NULL)
+ if (brdp->membase == NULL)
{
release_region(brdp->iobase, brdp->iosize);
- return(-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -4097,21 +3658,17 @@ static int stli_initonb(stlibrd_t *brdp)
* this is, and how many ports.
*/
EBRDENABLE(brdp);
- sigsp = (cdkonbsig_t *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
- memcpy(&sig, sigsp, sizeof(cdkonbsig_t));
+ sigsp = (cdkonbsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
+ memcpy_fromio(&sig, sigsp, sizeof(cdkonbsig_t));
EBRDDISABLE(brdp);
-#if 0
- printk("%s(%d): sig-> magic=%x:%x:%x:%x romver=%x amask=%x:%x:%x\n",
- __FILE__, __LINE__, sig.magic0, sig.magic1, sig.magic2,
- sig.magic3, sig.romver, sig.amask0, sig.amask1, sig.amask2);
-#endif
-
- if ((sig.magic0 != ONB_MAGIC0) || (sig.magic1 != ONB_MAGIC1) ||
- (sig.magic2 != ONB_MAGIC2) || (sig.magic3 != ONB_MAGIC3))
+ if (sig.magic0 != cpu_to_le16(ONB_MAGIC0) ||
+ sig.magic1 != cpu_to_le16(ONB_MAGIC1) ||
+ sig.magic2 != cpu_to_le16(ONB_MAGIC2) ||
+ sig.magic3 != cpu_to_le16(ONB_MAGIC3))
{
release_region(brdp->iobase, brdp->iosize);
- return(-ENODEV);
+ return -ENODEV;
}
/*
@@ -4132,7 +3689,7 @@ static int stli_initonb(stlibrd_t *brdp)
brdp->state |= BST_FOUND;
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4145,31 +3702,25 @@ static int stli_initonb(stlibrd_t *brdp)
static int stli_startbrd(stlibrd_t *brdp)
{
- volatile cdkhdr_t *hdrp;
- volatile cdkmem_t *memp;
- volatile cdkasy_t *ap;
- unsigned long flags;
- stliport_t *portp;
- int portnr, nrdevs, i, rc;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_startbrd(brdp=%x)\n", (int) brdp);
-#endif
-
- rc = 0;
-
- save_flags(flags);
- cli();
+ cdkhdr_t __iomem *hdrp;
+ cdkmem_t __iomem *memp;
+ cdkasy_t __iomem *ap;
+ unsigned long flags;
+ stliport_t *portp;
+ int portnr, nrdevs, i, rc = 0;
+ u32 memoff;
+
+ spin_lock_irqsave(&brd_lock, flags);
EBRDENABLE(brdp);
- hdrp = (volatile cdkhdr_t *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
+ hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
nrdevs = hdrp->nrdevs;
#if 0
printk("%s(%d): CDK version %d.%d.%d --> "
"nrdevs=%d memp=%x hostp=%x slavep=%x\n",
- __FILE__, __LINE__, hdrp->ver_release, hdrp->ver_modification,
- hdrp->ver_fix, nrdevs, (int) hdrp->memp, (int) hdrp->hostp,
- (int) hdrp->slavep);
+ __FILE__, __LINE__, readb(&hdrp->ver_release), readb(&hdrp->ver_modification),
+ readb(&hdrp->ver_fix), nrdevs, (int) readl(&hdrp->memp), readl(&hdrp->hostp),
+ readl(&hdrp->slavep));
#endif
if (nrdevs < (brdp->nrports + 1)) {
@@ -4181,14 +3732,14 @@ static int stli_startbrd(stlibrd_t *brdp)
brdp->hostoffset = hdrp->hostp - CDK_CDKADDR;
brdp->slaveoffset = hdrp->slavep - CDK_CDKADDR;
brdp->bitsize = (nrdevs + 7) / 8;
- memp = (volatile cdkmem_t *) hdrp->memp;
- if (((unsigned long) memp) > brdp->memsize) {
+ memoff = readl(&hdrp->memp);
+ if (memoff > brdp->memsize) {
printk(KERN_ERR "STALLION: corrupted shared memory region?\n");
rc = -EIO;
goto stli_donestartup;
}
- memp = (volatile cdkmem_t *) EBRDGETMEMPTR(brdp, (unsigned long) memp);
- if (memp->dtype != TYP_ASYNCTRL) {
+ memp = (cdkmem_t __iomem *) EBRDGETMEMPTR(brdp, memoff);
+ if (readw(&memp->dtype) != TYP_ASYNCTRL) {
printk(KERN_ERR "STALLION: no slave control device found\n");
goto stli_donestartup;
}
@@ -4200,19 +3751,19 @@ static int stli_startbrd(stlibrd_t *brdp)
* change pages while reading memory map.
*/
for (i = 1, portnr = 0; (i < nrdevs); i++, portnr++, memp++) {
- if (memp->dtype != TYP_ASYNC)
+ if (readw(&memp->dtype) != TYP_ASYNC)
break;
portp = brdp->ports[portnr];
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
break;
portp->devnr = i;
- portp->addr = memp->offset;
+ portp->addr = readl(&memp->offset);
portp->reqbit = (unsigned char) (0x1 << (i * 8 / nrdevs));
portp->portidx = (unsigned char) (i / 8);
portp->portbit = (unsigned char) (0x1 << (i % 8));
}
- hdrp->slavereq = 0xff;
+ writeb(0xff, &hdrp->slavereq);
/*
* For each port setup a local copy of the RX and TX buffer offsets
@@ -4221,22 +3772,22 @@ static int stli_startbrd(stlibrd_t *brdp)
*/
for (i = 1, portnr = 0; (i < nrdevs); i++, portnr++) {
portp = brdp->ports[portnr];
- if (portp == (stliport_t *) NULL)
+ if (portp == NULL)
break;
if (portp->addr == 0)
break;
- ap = (volatile cdkasy_t *) EBRDGETMEMPTR(brdp, portp->addr);
- if (ap != (volatile cdkasy_t *) NULL) {
- portp->rxsize = ap->rxq.size;
- portp->txsize = ap->txq.size;
- portp->rxoffset = ap->rxq.offset;
- portp->txoffset = ap->txq.offset;
+ ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
+ if (ap != NULL) {
+ portp->rxsize = readw(&ap->rxq.size);
+ portp->txsize = readw(&ap->txq.size);
+ portp->rxoffset = readl(&ap->rxq.offset);
+ portp->txoffset = readl(&ap->txq.offset);
}
}
stli_donestartup:
EBRDDISABLE(brdp);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
if (rc == 0)
brdp->state |= BST_STARTED;
@@ -4247,7 +3798,7 @@ stli_donestartup:
add_timer(&stli_timerlist);
}
- return(rc);
+ return rc;
}
/*****************************************************************************/
@@ -4258,10 +3809,6 @@ stli_donestartup:
static int __init stli_brdinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_brdinit(brdp=%x)\n", (int) brdp);
-#endif
-
stli_brds[brdp->brdnr] = brdp;
switch (brdp->brdtype) {
@@ -4289,11 +3836,11 @@ static int __init stli_brdinit(stlibrd_t *brdp)
case BRD_ECHPCI:
printk(KERN_ERR "STALLION: %s board type not supported in "
"this driver\n", stli_brdnames[brdp->brdtype]);
- return(ENODEV);
+ return -ENODEV;
default:
printk(KERN_ERR "STALLION: board=%d is unknown board "
"type=%d\n", brdp->brdnr, brdp->brdtype);
- return(ENODEV);
+ return -ENODEV;
}
if ((brdp->state & BST_FOUND) == 0) {
@@ -4301,7 +3848,7 @@ static int __init stli_brdinit(stlibrd_t *brdp)
"io=%x mem=%x\n",
stli_brdnames[brdp->brdtype], brdp->brdnr,
brdp->iobase, (int) brdp->memaddr);
- return(ENODEV);
+ return -ENODEV;
}
stli_initports(brdp);
@@ -4309,7 +3856,7 @@ static int __init stli_brdinit(stlibrd_t *brdp)
"nrpanels=%d nrports=%d\n", stli_brdnames[brdp->brdtype],
brdp->brdnr, brdp->iobase, (int) brdp->memaddr,
brdp->nrpanels, brdp->nrports);
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4321,14 +3868,10 @@ static int __init stli_brdinit(stlibrd_t *brdp)
static int stli_eisamemprobe(stlibrd_t *brdp)
{
- cdkecpsig_t ecpsig, *ecpsigp;
- cdkonbsig_t onbsig, *onbsigp;
+ cdkecpsig_t ecpsig, __iomem *ecpsigp;
+ cdkonbsig_t onbsig, __iomem *onbsigp;
int i, foundit;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_eisamemprobe(brdp=%x)\n", (int) brdp);
-#endif
-
/*
* First up we reset the board, to get it into a known state. There
* is only 2 board types here we need to worry about. Don;t use the
@@ -4352,7 +3895,7 @@ static int stli_eisamemprobe(stlibrd_t *brdp)
mdelay(1);
stli_onbeenable(brdp);
} else {
- return(-ENODEV);
+ return -ENODEV;
}
foundit = 0;
@@ -4364,25 +3907,24 @@ static int stli_eisamemprobe(stlibrd_t *brdp)
*/
for (i = 0; (i < stli_eisamempsize); i++) {
brdp->memaddr = stli_eisamemprobeaddrs[i];
- brdp->membase = (void *) brdp->memaddr;
brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
- if (brdp->membase == (void *) NULL)
+ if (brdp->membase == NULL)
continue;
if (brdp->brdtype == BRD_ECPE) {
- ecpsigp = (cdkecpsig_t *) stli_ecpeigetmemptr(brdp,
+ ecpsigp = (cdkecpsig_t __iomem *) stli_ecpeigetmemptr(brdp,
CDK_SIGADDR, __LINE__);
- memcpy(&ecpsig, ecpsigp, sizeof(cdkecpsig_t));
- if (ecpsig.magic == ECP_MAGIC)
+ memcpy_fromio(&ecpsig, ecpsigp, sizeof(cdkecpsig_t));
+ if (ecpsig.magic == cpu_to_le32(ECP_MAGIC))
foundit = 1;
} else {
- onbsigp = (cdkonbsig_t *) stli_onbegetmemptr(brdp,
+ onbsigp = (cdkonbsig_t __iomem *) stli_onbegetmemptr(brdp,
CDK_SIGADDR, __LINE__);
- memcpy(&onbsig, onbsigp, sizeof(cdkonbsig_t));
- if ((onbsig.magic0 == ONB_MAGIC0) &&
- (onbsig.magic1 == ONB_MAGIC1) &&
- (onbsig.magic2 == ONB_MAGIC2) &&
- (onbsig.magic3 == ONB_MAGIC3))
+ memcpy_fromio(&onbsig, onbsigp, sizeof(cdkonbsig_t));
+ if ((onbsig.magic0 == cpu_to_le16(ONB_MAGIC0)) &&
+ (onbsig.magic1 == cpu_to_le16(ONB_MAGIC1)) &&
+ (onbsig.magic2 == cpu_to_le16(ONB_MAGIC2)) &&
+ (onbsig.magic3 == cpu_to_le16(ONB_MAGIC3)))
foundit = 1;
}
@@ -4406,9 +3948,9 @@ static int stli_eisamemprobe(stlibrd_t *brdp)
printk(KERN_ERR "STALLION: failed to probe shared memory "
"region for %s in EISA slot=%d\n",
stli_brdnames[brdp->brdtype], (brdp->iobase >> 12));
- return(-ENODEV);
+ return -ENODEV;
}
- return(0);
+ return 0;
}
static int stli_getbrdnr(void)
@@ -4439,22 +3981,16 @@ static int stli_getbrdnr(void)
static int stli_findeisabrds(void)
{
- stlibrd_t *brdp;
- unsigned int iobase, eid;
- int i;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_findeisabrds()\n");
-#endif
+ stlibrd_t *brdp;
+ unsigned int iobase, eid;
+ int i;
/*
- * Firstly check if this is an EISA system. Do this by probing for
- * the system board EISA ID. If this is not an EISA system then
+ * Firstly check if this is an EISA system. If this is not an EISA system then
* don't bother going any further!
*/
- outb(0xff, 0xc80);
- if (inb(0xc80) == 0xff)
- return(0);
+ if (EISA_bus)
+ return 0;
/*
* Looks like an EISA system, so go searching for EISA boards.
@@ -4472,7 +4008,7 @@ static int stli_findeisabrds(void)
*/
for (i = 0; (i < STL_MAXBRDS); i++) {
brdp = stli_brds[i];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
continue;
if (brdp->iobase == iobase)
break;
@@ -4484,10 +4020,10 @@ static int stli_findeisabrds(void)
* We have found a Stallion board and it is not configured already.
* Allocate a board structure and initialize it.
*/
- if ((brdp = stli_allocbrd()) == (stlibrd_t *) NULL)
- return(-ENOMEM);
+ if ((brdp = stli_allocbrd()) == NULL)
+ return -ENOMEM;
if ((brdp->brdnr = stli_getbrdnr()) < 0)
- return(-ENOMEM);
+ return -ENOMEM;
eid = inb(iobase + 0xc82);
if (eid == ECP_EISAID)
brdp->brdtype = BRD_ECPE;
@@ -4502,7 +4038,7 @@ static int stli_findeisabrds(void)
stli_brdinit(brdp);
}
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4523,32 +4059,18 @@ static int stli_findeisabrds(void)
static int stli_initpcibrd(int brdtype, struct pci_dev *devp)
{
- stlibrd_t *brdp;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n",
- brdtype, dev->bus->number, dev->devfn);
-#endif
+ stlibrd_t *brdp;
if (pci_enable_device(devp))
- return(-EIO);
- if ((brdp = stli_allocbrd()) == (stlibrd_t *) NULL)
- return(-ENOMEM);
+ return -EIO;
+ if ((brdp = stli_allocbrd()) == NULL)
+ return -ENOMEM;
if ((brdp->brdnr = stli_getbrdnr()) < 0) {
printk(KERN_INFO "STALLION: too many boards found, "
"maximum supported %d\n", STL_MAXBRDS);
- return(0);
+ return 0;
}
brdp->brdtype = brdtype;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "%s(%d): BAR[]=%lx,%lx,%lx,%lx\n", __FILE__, __LINE__,
- pci_resource_start(devp, 0),
- pci_resource_start(devp, 1),
- pci_resource_start(devp, 2),
- pci_resource_start(devp, 3));
-#endif
-
/*
* We have all resources from the board, so lets setup the actual
* board structure now.
@@ -4557,7 +4079,7 @@ static int stli_initpcibrd(int brdtype, struct pci_dev *devp)
brdp->memaddr = pci_resource_start(devp, 2);
stli_brdinit(brdp);
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4569,20 +4091,12 @@ static int stli_initpcibrd(int brdtype, struct pci_dev *devp)
static int stli_findpcibrds(void)
{
- struct pci_dev *dev = NULL;
- int rc;
-
-#ifdef DEBUG
- printk("stli_findpcibrds()\n");
-#endif
+ struct pci_dev *dev = NULL;
- while ((dev = pci_find_device(PCI_VENDOR_ID_STALLION,
- PCI_DEVICE_ID_ECRA, dev))) {
- if ((rc = stli_initpcibrd(BRD_ECPPCI, dev)))
- return(rc);
+ while ((dev = pci_get_device(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECRA, dev))) {
+ stli_initpcibrd(BRD_ECPPCI, dev);
}
-
- return(0);
+ return 0;
}
#endif
@@ -4595,17 +4109,16 @@ static int stli_findpcibrds(void)
static stlibrd_t *stli_allocbrd(void)
{
- stlibrd_t *brdp;
+ stlibrd_t *brdp;
brdp = kzalloc(sizeof(stlibrd_t), GFP_KERNEL);
if (!brdp) {
printk(KERN_ERR "STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlibrd_t));
+ "(size=%Zd)\n", sizeof(stlibrd_t));
return NULL;
}
-
brdp->magic = STLI_BOARDMAGIC;
- return(brdp);
+ return brdp;
}
/*****************************************************************************/
@@ -4617,13 +4130,9 @@ static stlibrd_t *stli_allocbrd(void)
static int stli_initbrds(void)
{
- stlibrd_t *brdp, *nxtbrdp;
- stlconf_t *confp;
- int i, j;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_initbrds()\n");
-#endif
+ stlibrd_t *brdp, *nxtbrdp;
+ stlconf_t *confp;
+ int i, j;
if (stli_nrbrds > STL_MAXBRDS) {
printk(KERN_INFO "STALLION: too many boards in configuration "
@@ -4638,11 +4147,9 @@ static int stli_initbrds(void)
*/
for (i = 0; (i < stli_nrbrds); i++) {
confp = &stli_brdconf[i];
-#ifdef MODULE
stli_parsebrd(confp, stli_brdsp[i]);
-#endif
- if ((brdp = stli_allocbrd()) == (stlibrd_t *) NULL)
- return(-ENOMEM);
+ if ((brdp = stli_allocbrd()) == NULL)
+ return -ENOMEM;
brdp->brdnr = i;
brdp->brdtype = confp->brdtype;
brdp->iobase = confp->ioaddr1;
@@ -4654,9 +4161,7 @@ static int stli_initbrds(void)
* Static configuration table done, so now use dynamic methods to
* see if any more boards should be configured.
*/
-#ifdef MODULE
stli_argbrds();
-#endif
if (STLI_EISAPROBE)
stli_findeisabrds();
#ifdef CONFIG_PCI
@@ -4672,11 +4177,11 @@ static int stli_initbrds(void)
if (stli_nrbrds > 1) {
for (i = 0; (i < stli_nrbrds); i++) {
brdp = stli_brds[i];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
continue;
for (j = i + 1; (j < stli_nrbrds); j++) {
nxtbrdp = stli_brds[j];
- if (nxtbrdp == (stlibrd_t *) NULL)
+ if (nxtbrdp == NULL)
continue;
if ((brdp->membase >= nxtbrdp->membase) &&
(brdp->membase <= (nxtbrdp->membase +
@@ -4691,7 +4196,7 @@ static int stli_initbrds(void)
if (stli_shared == 0) {
for (i = 0; (i < stli_nrbrds); i++) {
brdp = stli_brds[i];
- if (brdp == (stlibrd_t *) NULL)
+ if (brdp == NULL)
continue;
if (brdp->state & BST_FOUND) {
EBRDENABLE(brdp);
@@ -4701,7 +4206,7 @@ static int stli_initbrds(void)
}
}
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4714,48 +4219,55 @@ static int stli_initbrds(void)
static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp)
{
- unsigned long flags;
- void *memptr;
- stlibrd_t *brdp;
- int brdnr, size, n;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_memread(fp=%x,buf=%x,count=%x,offp=%x)\n",
- (int) fp, (int) buf, count, (int) offp);
-#endif
+ unsigned long flags;
+ void *memptr;
+ stlibrd_t *brdp;
+ int brdnr, size, n;
+ void *p;
+ loff_t off = *offp;
brdnr = iminor(fp->f_dentry->d_inode);
if (brdnr >= stli_nrbrds)
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
if (brdp->state == 0)
- return(-ENODEV);
- if (fp->f_pos >= brdp->memsize)
- return(0);
+ return -ENODEV;
+ if (off >= brdp->memsize || off + count < off)
+ return 0;
- size = MIN(count, (brdp->memsize - fp->f_pos));
+ size = MIN(count, (brdp->memsize - off));
+
+ /*
+ * Copy the data a page at a time
+ */
+
+ p = (void *)__get_free_page(GFP_KERNEL);
+ if(p == NULL)
+ return -ENOMEM;
- save_flags(flags);
- cli();
- EBRDENABLE(brdp);
while (size > 0) {
- memptr = (void *) EBRDGETMEMPTR(brdp, fp->f_pos);
- n = MIN(size, (brdp->pagesize - (((unsigned long) fp->f_pos) % brdp->pagesize)));
- if (copy_to_user(buf, memptr, n)) {
+ spin_lock_irqsave(&brd_lock, flags);
+ EBRDENABLE(brdp);
+ memptr = (void *) EBRDGETMEMPTR(brdp, off);
+ n = MIN(size, (brdp->pagesize - (((unsigned long) off) % brdp->pagesize)));
+ n = MIN(n, PAGE_SIZE);
+ memcpy_fromio(p, memptr, n);
+ EBRDDISABLE(brdp);
+ spin_unlock_irqrestore(&brd_lock, flags);
+ if (copy_to_user(buf, p, n)) {
count = -EFAULT;
goto out;
}
- fp->f_pos += n;
+ off += n;
buf += n;
size -= n;
}
out:
- EBRDDISABLE(brdp);
- restore_flags(flags);
-
- return(count);
+ *offp = off;
+ free_page((unsigned long)p);
+ return count;
}
/*****************************************************************************/
@@ -4764,54 +4276,65 @@ out:
* Code to handle an "staliomem" write operation. This device is the
* contents of the board shared memory. It is used for down loading
* the slave image (and debugging :-)
+ *
+ * FIXME: copy under lock
*/
static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp)
{
- unsigned long flags;
- void *memptr;
- stlibrd_t *brdp;
- char __user *chbuf;
- int brdnr, size, n;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_memwrite(fp=%x,buf=%x,count=%x,offp=%x)\n",
- (int) fp, (int) buf, count, (int) offp);
-#endif
+ unsigned long flags;
+ void *memptr;
+ stlibrd_t *brdp;
+ char __user *chbuf;
+ int brdnr, size, n;
+ void *p;
+ loff_t off = *offp;
brdnr = iminor(fp->f_dentry->d_inode);
+
if (brdnr >= stli_nrbrds)
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
if (brdp->state == 0)
- return(-ENODEV);
- if (fp->f_pos >= brdp->memsize)
- return(0);
+ return -ENODEV;
+ if (off >= brdp->memsize || off + count < off)
+ return 0;
chbuf = (char __user *) buf;
- size = MIN(count, (brdp->memsize - fp->f_pos));
+ size = MIN(count, (brdp->memsize - off));
+
+ /*
+ * Copy the data a page at a time
+ */
+
+ p = (void *)__get_free_page(GFP_KERNEL);
+ if(p == NULL)
+ return -ENOMEM;
- save_flags(flags);
- cli();
- EBRDENABLE(brdp);
while (size > 0) {
- memptr = (void *) EBRDGETMEMPTR(brdp, fp->f_pos);
- n = MIN(size, (brdp->pagesize - (((unsigned long) fp->f_pos) % brdp->pagesize)));
- if (copy_from_user(memptr, chbuf, n)) {
- count = -EFAULT;
+ n = MIN(size, (brdp->pagesize - (((unsigned long) off) % brdp->pagesize)));
+ n = MIN(n, PAGE_SIZE);
+ if (copy_from_user(p, chbuf, n)) {
+ if (count == 0)
+ count = -EFAULT;
goto out;
}
- fp->f_pos += n;
+ spin_lock_irqsave(&brd_lock, flags);
+ EBRDENABLE(brdp);
+ memptr = (void *) EBRDGETMEMPTR(brdp, off);
+ memcpy_toio(memptr, p, n);
+ EBRDDISABLE(brdp);
+ spin_unlock_irqrestore(&brd_lock, flags);
+ off += n;
chbuf += n;
size -= n;
}
out:
- EBRDDISABLE(brdp);
- restore_flags(flags);
-
- return(count);
+ free_page((unsigned long) p);
+ *offp = off;
+ return count;
}
/*****************************************************************************/
@@ -4822,16 +4345,16 @@ out:
static int stli_getbrdstats(combrd_t __user *bp)
{
- stlibrd_t *brdp;
- int i;
+ stlibrd_t *brdp;
+ int i;
if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
return -EFAULT;
if (stli_brdstats.brd >= STL_MAXBRDS)
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[stli_brdstats.brd];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
memset(&stli_brdstats, 0, sizeof(combrd_t));
stli_brdstats.brd = brdp->brdnr;
@@ -4850,7 +4373,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
if (copy_to_user(bp, &stli_brdstats, sizeof(combrd_t)))
return -EFAULT;
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4861,19 +4384,19 @@ static int stli_getbrdstats(combrd_t __user *bp)
static stliport_t *stli_getport(int brdnr, int panelnr, int portnr)
{
- stlibrd_t *brdp;
- int i;
+ stlibrd_t *brdp;
+ int i;
- if ((brdnr < 0) || (brdnr >= STL_MAXBRDS))
- return((stliport_t *) NULL);
+ if (brdnr < 0 || brdnr >= STL_MAXBRDS)
+ return NULL;
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return((stliport_t *) NULL);
+ if (brdp == NULL)
+ return NULL;
for (i = 0; (i < panelnr); i++)
portnr += brdp->panels[i];
if ((portnr < 0) || (portnr >= brdp->nrports))
- return((stliport_t *) NULL);
- return(brdp->ports[portnr]);
+ return NULL;
+ return brdp->ports[portnr];
}
/*****************************************************************************/
@@ -4892,16 +4415,16 @@ static int stli_portcmdstats(stliport_t *portp)
memset(&stli_comstats, 0, sizeof(comstats_t));
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
+ if (portp == NULL)
+ return -ENODEV;
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (brdp == NULL)
+ return -ENODEV;
if (brdp->state & BST_STARTED) {
if ((rc = stli_cmdwait(brdp, portp, A_GETSTATS,
&stli_cdkstats, sizeof(asystats_t), 1)) < 0)
- return(rc);
+ return rc;
} else {
memset(&stli_cdkstats, 0, sizeof(asystats_t));
}
@@ -4912,13 +4435,12 @@ static int stli_portcmdstats(stliport_t *portp)
stli_comstats.state = portp->state;
stli_comstats.flags = portp->flags;
- save_flags(flags);
- cli();
- if (portp->tty != (struct tty_struct *) NULL) {
+ spin_lock_irqsave(&brd_lock, flags);
+ if (portp->tty != NULL) {
if (portp->tty->driver_data == portp) {
stli_comstats.ttystate = portp->tty->flags;
- stli_comstats.rxbuffered = -1 /*portp->tty->flip.count*/;
- if (portp->tty->termios != (struct termios *) NULL) {
+ stli_comstats.rxbuffered = -1;
+ if (portp->tty->termios != NULL) {
stli_comstats.cflags = portp->tty->termios->c_cflag;
stli_comstats.iflags = portp->tty->termios->c_iflag;
stli_comstats.oflags = portp->tty->termios->c_oflag;
@@ -4926,7 +4448,7 @@ static int stli_portcmdstats(stliport_t *portp)
}
}
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
stli_comstats.txtotal = stli_cdkstats.txchars;
stli_comstats.rxtotal = stli_cdkstats.rxchars + stli_cdkstats.ringover;
@@ -4948,7 +4470,7 @@ static int stli_portcmdstats(stliport_t *portp)
stli_comstats.hwid = stli_cdkstats.hwid;
stli_comstats.signals = stli_mktiocm(stli_cdkstats.signals);
- return(0);
+ return 0;
}
/*****************************************************************************/
@@ -4961,8 +4483,8 @@ static int stli_portcmdstats(stliport_t *portp)
static int stli_getportstats(stliport_t *portp, comstats_t __user *cp)
{
- stlibrd_t *brdp;
- int rc;
+ stlibrd_t *brdp;
+ int rc;
if (!portp) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
@@ -4992,8 +4514,8 @@ static int stli_getportstats(stliport_t *portp, comstats_t __user *cp)
static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp)
{
- stlibrd_t *brdp;
- int rc;
+ stlibrd_t *brdp;
+ int rc;
if (!portp) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
@@ -5031,7 +4553,7 @@ static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp)
static int stli_getportstruct(stliport_t __user *arg)
{
- stliport_t *portp;
+ stliport_t *portp;
if (copy_from_user(&stli_dummyport, arg, sizeof(stliport_t)))
return -EFAULT;
@@ -5052,7 +4574,7 @@ static int stli_getportstruct(stliport_t __user *arg)
static int stli_getbrdstruct(stlibrd_t __user *arg)
{
- stlibrd_t *brdp;
+ stlibrd_t *brdp;
if (copy_from_user(&stli_dummybrd, arg, sizeof(stlibrd_t)))
return -EFAULT;
@@ -5076,15 +4598,10 @@ static int stli_getbrdstruct(stlibrd_t __user *arg)
static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
{
- stlibrd_t *brdp;
- int brdnr, rc, done;
+ stlibrd_t *brdp;
+ int brdnr, rc, done;
void __user *argp = (void __user *)arg;
-#ifdef DEBUG
- printk(KERN_DEBUG "stli_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n",
- (int) ip, (int) fp, cmd, (int) arg);
-#endif
-
/*
* First up handle the board independent ioctls.
*/
@@ -5115,7 +4632,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
}
if (done)
- return(rc);
+ return rc;
/*
* Now handle the board specific ioctls. These all depend on the
@@ -5123,12 +4640,12 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
*/
brdnr = iminor(ip);
if (brdnr >= STL_MAXBRDS)
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[brdnr];
if (!brdp)
- return(-ENODEV);
+ return -ENODEV;
if (brdp->state == 0)
- return(-ENODEV);
+ return -ENODEV;
switch (cmd) {
case STL_BINTR:
@@ -5152,8 +4669,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
rc = -ENOIOCTLCMD;
break;
}
-
- return(rc);
+ return rc;
}
static struct tty_operations stli_ops = {
@@ -5187,6 +4703,9 @@ int __init stli_init(void)
int i;
printk(KERN_INFO "%s: version %s\n", stli_drvtitle, stli_drvversion);
+ spin_lock_init(&stli_lock);
+ spin_lock_init(&brd_lock);
+
stli_initbrds();
stli_serial = alloc_tty_driver(STL_MAXBRDS * STL_MAXPORTS);
@@ -5196,10 +4715,6 @@ int __init stli_init(void)
/*
* Allocate a temporary write buffer.
*/
- stli_tmpwritebuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
- if (!stli_tmpwritebuf)
- printk(KERN_ERR "STALLION: failed to allocate memory "
- "(size=%d)\n", STLI_TXBUFSIZE);
stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
if (!stli_txcookbuf)
printk(KERN_ERR "STALLION: failed to allocate memory "
@@ -5243,7 +4758,7 @@ int __init stli_init(void)
printk(KERN_ERR "STALLION: failed to register serial driver\n");
return -EBUSY;
}
- return(0);
+ return 0;
}
/*****************************************************************************/
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index edd996f6fb87..4bb3d2272604 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -151,6 +151,7 @@ unsigned char kbd_sysrq_xlate[KEY_MAX + 1] =
"230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
"\r\000/"; /* 0x60 - 0x6f */
static int sysrq_down;
+static int sysrq_alt_use;
#endif
static int sysrq_alt;
@@ -673,7 +674,7 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struc
*/
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
{
- static unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
+ static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
value = ret_diacr[value];
k_deadunicode(vc, value, up_flag, regs);
}
@@ -710,8 +711,8 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag, struct
static void k_pad(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
{
- static const char *pad_chars = "0123456789+-*/\015,.?()#";
- static const char *app_map = "pqrstuvwxylSRQMnnmPQS";
+ static const char pad_chars[] = "0123456789+-*/\015,.?()#";
+ static const char app_map[] = "pqrstuvwxylSRQMnnmPQS";
if (up_flag)
return; /* no action, if this is a key release */
@@ -1036,7 +1037,7 @@ static void kbd_refresh_leds(struct input_handle *handle)
#define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\
((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001))
-static unsigned short x86_keycodes[256] =
+static const unsigned short x86_keycodes[256] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
@@ -1074,11 +1075,13 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
put_queue(vc, 0x1d | up_flag);
put_queue(vc, 0x45 | up_flag);
return 0;
- case KEY_HANGUEL:
- if (!up_flag) put_queue(vc, 0xf1);
+ case KEY_HANGEUL:
+ if (!up_flag)
+ put_queue(vc, 0xf2);
return 0;
case KEY_HANJA:
- if (!up_flag) put_queue(vc, 0xf2);
+ if (!up_flag)
+ put_queue(vc, 0xf1);
return 0;
}
@@ -1143,7 +1146,7 @@ static void kbd_keycode(unsigned int keycode, int down,
kbd = kbd_table + fg_console;
if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT)
- sysrq_alt = down;
+ sysrq_alt = down ? keycode : 0;
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
@@ -1163,9 +1166,14 @@ static void kbd_keycode(unsigned int keycode, int down,
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
- sysrq_down = down;
+ if (!sysrq_down) {
+ sysrq_down = down;
+ sysrq_alt_use = sysrq_alt;
+ }
return;
}
+ if (sysrq_down && !down && keycode == sysrq_alt_use)
+ sysrq_down = 0;
if (sysrq_down && down && !rep) {
handle_sysrq(kbd_sysrq_xlate[keycode], regs, tty);
return;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index f43c2e04eadd..01247cccb89f 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -301,7 +301,7 @@ static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
-static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(moxa_lock);
#ifdef CONFIG_PCI
static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 645d9d713aec..72cfd09091e0 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -996,7 +996,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
info->session = current->signal->session;
info->pgrp = process_group(current);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
/*
status = mxser_get_msr(info->base, 0, info->port);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index b9371d5bf790..603b9ade5eb0 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1132,7 +1132,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
* buffer, and once to drain the space from the (physical) beginning of
* the buffer to head pointer.
*
- * Called under the tty->atomic_read_lock sem and with TTY_DONT_FLIP set
+ * Called under the tty->atomic_read_lock sem
*
*/
@@ -1271,7 +1271,6 @@ do_it_again:
}
add_wait_queue(&tty->read_wait, &wait);
- set_bit(TTY_DONT_FLIP, &tty->flags);
while (nr) {
/* First test for status change. */
if (tty->packet && tty->link->ctrl_status) {
@@ -1315,9 +1314,7 @@ do_it_again:
break;
}
n_tty_set_room(tty);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
timeout = schedule_timeout(timeout);
- set_bit(TTY_DONT_FLIP, &tty->flags);
continue;
}
__set_current_state(TASK_RUNNING);
@@ -1394,7 +1391,6 @@ do_it_again:
if (time)
timeout = time;
}
- clear_bit(TTY_DONT_FLIP, &tty->flags);
mutex_unlock(&tty->atomic_read_lock);
remove_wait_queue(&tty->read_wait, &wait);
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 000000000000..5b91e4e25641
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,142 @@
+/* linux/drivers/char/nsc_gpio.c
+
+ National Semiconductor common GPIO device-file/VFS methods.
+ Allows a user space process to control the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+*/
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#define NAME "nsc_gpio"
+
+void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
+{
+ /* retrieve current config w/o changing it */
+ u32 config = amp->gpio_config(index, ~0, 0);
+
+ /* user requested via 'v' command, so its INFO */
+ dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
+ index, config,
+ (config & 1) ? "OE" : "TS", /* output-enabled/tristate */
+ (config & 2) ? "PP" : "OD", /* push pull / open drain */
+ (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
+ (config & 8) ? "LOCKED" : "", /* locked / unlocked */
+ (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
+ (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
+ (config & 64) ? "DEBOUNCE" : "", /* debounce */
+
+ amp->gpio_get(index), amp->gpio_current(index));
+}
+
+ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ unsigned m = iminor(file->f_dentry->d_inode);
+ struct nsc_gpio_ops *amp = file->private_data;
+ struct device *dev = amp->dev;
+ size_t i;
+ int err = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ switch (c) {
+ case '0':
+ amp->gpio_set(m, 0);
+ break;
+ case '1':
+ amp->gpio_set(m, 1);
+ break;
+ case 'O':
+ dev_dbg(dev, "GPIO%d output enabled\n", m);
+ amp->gpio_config(m, ~1, 1);
+ break;
+ case 'o':
+ dev_dbg(dev, "GPIO%d output disabled\n", m);
+ amp->gpio_config(m, ~1, 0);
+ break;
+ case 'T':
+ dev_dbg(dev, "GPIO%d output is push pull\n",
+ m);
+ amp->gpio_config(m, ~2, 2);
+ break;
+ case 't':
+ dev_dbg(dev, "GPIO%d output is open drain\n",
+ m);
+ amp->gpio_config(m, ~2, 0);
+ break;
+ case 'P':
+ dev_dbg(dev, "GPIO%d pull up enabled\n", m);
+ amp->gpio_config(m, ~4, 4);
+ break;
+ case 'p':
+ dev_dbg(dev, "GPIO%d pull up disabled\n", m);
+ amp->gpio_config(m, ~4, 0);
+ break;
+ case 'v':
+ /* View Current pin settings */
+ amp->gpio_dump(amp, m);
+ break;
+ case '\n':
+ /* end of settings string, do nothing */
+ break;
+ default:
+ dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
+ m, (int)c);
+ err++;
+ }
+ }
+ if (err)
+ return -EINVAL; /* full string handled, report error */
+
+ return len;
+}
+
+ssize_t nsc_gpio_read(struct file *file, char __user * buf,
+ size_t len, loff_t * ppos)
+{
+ unsigned m = iminor(file->f_dentry->d_inode);
+ int value;
+ struct nsc_gpio_ops *amp = file->private_data;
+
+ value = amp->gpio_get(m);
+ if (put_user(value ? '1' : '0', buf))
+ return -EFAULT;
+
+ return 1;
+}
+
+/* common file-ops routines for both scx200_gpio and pc87360_gpio */
+EXPORT_SYMBOL(nsc_gpio_write);
+EXPORT_SYMBOL(nsc_gpio_read);
+EXPORT_SYMBOL(nsc_gpio_dump);
+
+static int __init nsc_gpio_init(void)
+{
+ printk(KERN_DEBUG NAME " initializing\n");
+ return 0;
+}
+
+static void __exit nsc_gpio_cleanup(void)
+{
+ printk(KERN_DEBUG NAME " cleanup\n");
+}
+
+module_init(nsc_gpio_init);
+module_exit(nsc_gpio_cleanup);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 000000000000..1c706ccfdbb3
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,340 @@
+/* linux/drivers/char/pc8736x_gpio.c
+
+ National Semiconductor PC8736x GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+
+ adapted from linux/drivers/char/scx200_gpio.c
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
+*/
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+
+#define DEVNAME "pc8736x_gpio"
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi PC-8736x GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static DEFINE_MUTEX(pc8736x_gpio_config_lock);
+static unsigned pc8736x_gpio_base;
+static u8 pc8736x_gpio_shadow[4];
+
+#define SIO_BASE1 0x2E /* 1st command-reg to check */
+#define SIO_BASE2 0x4E /* alt command-reg to check */
+#define SIO_BASE_OFFSET 0x20
+
+#define SIO_SID 0x20 /* SuperI/O ID Register */
+#define SIO_SID_VALUE 0xe9 /* Expected value in SuperI/O ID Register */
+
+#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
+
+#define PC8736X_GPIO_SIZE 16
+
+#define SIO_UNIT_SEL 0x7 /* unit select reg */
+#define SIO_UNIT_ACT 0x30 /* unit enable */
+#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
+#define SIO_VLM_UNIT 0x0D
+#define SIO_TMS_UNIT 0x0E
+
+/* config-space addrs to read/write each unit's runtime addr */
+#define SIO_BASE_HADDR 0x60
+#define SIO_BASE_LADDR 0x61
+
+/* GPIO config-space pin-control addresses */
+#define SIO_GPIO_PIN_SELECT 0xF0
+#define SIO_GPIO_PIN_CONFIG 0xF1
+#define SIO_GPIO_PIN_EVENT 0xF2
+
+static unsigned char superio_cmd = 0;
+static unsigned char selected_device = 0xFF; /* bogus start val */
+
+/* GPIO port runtime access, functionality */
+static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
+/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
+
+#define PORT_OUT 0
+#define PORT_IN 1
+#define PORT_EVT_EN 2
+#define PORT_EVT_STST 3
+
+static struct platform_device *pdev; /* use in dev_*() */
+
+static inline void superio_outb(int addr, int val)
+{
+ outb_p(addr, superio_cmd);
+ outb_p(val, superio_cmd + 1);
+}
+
+static inline int superio_inb(int addr)
+{
+ outb_p(addr, superio_cmd);
+ return inb_p(superio_cmd + 1);
+}
+
+static int pc8736x_superio_present(void)
+{
+ /* try the 2 possible values, read a hardware reg to verify */
+ superio_cmd = SIO_BASE1;
+ if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+ return superio_cmd;
+
+ superio_cmd = SIO_BASE2;
+ if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+ return superio_cmd;
+
+ return 0;
+}
+
+static void device_select(unsigned devldn)
+{
+ superio_outb(SIO_UNIT_SEL, devldn);
+ selected_device = devldn;
+}
+
+static void select_pin(unsigned iminor)
+{
+ /* select GPIO port/pin from device minor number */
+ device_select(SIO_GPIO_UNIT);
+ superio_outb(SIO_GPIO_PIN_SELECT,
+ ((iminor << 1) & 0xF0) | (iminor & 0x7));
+}
+
+static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
+ u32 func_slct)
+{
+ u32 config, new_config;
+
+ mutex_lock(&pc8736x_gpio_config_lock);
+
+ device_select(SIO_GPIO_UNIT);
+ select_pin(index);
+
+ /* read current config value */
+ config = superio_inb(func_slct);
+
+ /* set new config */
+ new_config = (config & mask) | bits;
+ superio_outb(func_slct, new_config);
+
+ mutex_unlock(&pc8736x_gpio_config_lock);
+
+ return config;
+}
+
+static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+ return pc8736x_gpio_configure_fn(index, mask, bits,
+ SIO_GPIO_PIN_CONFIG);
+}
+
+static int pc8736x_gpio_get(unsigned minor)
+{
+ int port, bit, val;
+
+ port = minor >> 3;
+ bit = minor & 7;
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+ val >>= bit;
+ val &= 1;
+
+ dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
+ minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
+ val);
+
+ return val;
+}
+
+static void pc8736x_gpio_set(unsigned minor, int val)
+{
+ int port, bit, curval;
+
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
+ pc8736x_gpio_base + port_offset[port] + PORT_OUT,
+ curval, bit, (curval & ~(1 << bit)), val, (val << bit));
+
+ val = (curval & ~(1 << bit)) | (val << bit);
+
+ dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
+ " %2x -> %2x\n", minor, port, bit, curval, val);
+
+ outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+
+ dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
+ pc8736x_gpio_shadow[port] = val;
+}
+
+static void pc8736x_gpio_set_high(unsigned index)
+{
+ pc8736x_gpio_set(index, 1);
+}
+
+static void pc8736x_gpio_set_low(unsigned index)
+{
+ pc8736x_gpio_set(index, 0);
+}
+
+static int pc8736x_gpio_current(unsigned minor)
+{
+ int port, bit;
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
+}
+
+static void pc8736x_gpio_change(unsigned index)
+{
+ pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
+}
+
+static struct nsc_gpio_ops pc8736x_access = {
+ .owner = THIS_MODULE,
+ .gpio_config = pc8736x_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = pc8736x_gpio_get,
+ .gpio_set = pc8736x_gpio_set,
+ .gpio_set_high = pc8736x_gpio_set_high,
+ .gpio_set_low = pc8736x_gpio_set_low,
+ .gpio_change = pc8736x_gpio_change,
+ .gpio_current = pc8736x_gpio_current
+};
+
+static int pc8736x_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &pc8736x_access;
+
+ dev_dbg(&pdev->dev, "open %d\n", m);
+
+ if (m > 63)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static struct file_operations pc8736x_gpio_fops = {
+ .owner = THIS_MODULE,
+ .open = pc8736x_gpio_open,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+};
+
+static void __init pc8736x_init_shadow(void)
+{
+ int port;
+
+ /* read the current values driven on the GPIO signals */
+ for (port = 0; port < 4; ++port)
+ pc8736x_gpio_shadow[port]
+ = inb_p(pc8736x_gpio_base + port_offset[port]
+ + PORT_OUT);
+
+}
+
+static int __init pc8736x_gpio_init(void)
+{
+ int rc = 0;
+
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ rc = -ENODEV;
+ goto undo_platform_dev_alloc;
+ }
+ dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
+
+ if (!pc8736x_superio_present()) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "no device found\n");
+ goto undo_platform_dev_add;
+ }
+ pc8736x_access.dev = &pdev->dev;
+
+ /* Verify that chip and it's GPIO unit are both enabled.
+ My BIOS does this, so I take minimum action here
+ */
+ rc = superio_inb(SIO_CF1);
+ if (!(rc & 0x01)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "device not enabled\n");
+ goto undo_platform_dev_add;
+ }
+ device_select(SIO_GPIO_UNIT);
+ if (!superio_inb(SIO_UNIT_ACT)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO unit not enabled\n");
+ goto undo_platform_dev_add;
+ }
+
+ /* read the GPIO unit base addr that chip responds to */
+ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
+ | superio_inb(SIO_BASE_LADDR));
+
+ if (!request_region(pc8736x_gpio_base, 16, DEVNAME)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO ioport %x busy\n",
+ pc8736x_gpio_base);
+ goto undo_platform_dev_add;
+ }
+ dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
+
+ rc = register_chrdev(major, DEVNAME, &pc8736x_gpio_fops);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
+ goto undo_platform_dev_add;
+ }
+ if (!major) {
+ major = rc;
+ dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
+ }
+
+ pc8736x_init_shadow();
+ return 0;
+
+undo_platform_dev_add:
+ platform_device_put(pdev);
+undo_platform_dev_alloc:
+ kfree(pdev);
+ return rc;
+}
+
+static void __exit pc8736x_gpio_cleanup(void)
+{
+ dev_dbg(&pdev->dev, " cleanup\n");
+
+ release_region(pc8736x_gpio_base, 16);
+
+ unregister_chrdev(major, DEVNAME);
+}
+
+EXPORT_SYMBOL(pc8736x_access);
+
+module_init(pc8736x_gpio_init);
+module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 9b5a2c0e7008..0c17f61549b4 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -101,7 +101,7 @@ static void pty_unthrottle(struct tty_struct * tty)
*
* FIXME: Our pty_write method is called with our ldisc lock held but
* not our partners. We can't just take the other one blindly without
- * risking deadlocks. There is also the small matter of TTY_DONT_FLIP
+ * risking deadlocks.
*/
static int pty_write(struct tty_struct * tty, const unsigned char *buf, int count)
{
diff --git a/drivers/char/rio/riointr.c b/drivers/char/rio/riointr.c
index eec1fea0cb92..0bd09040a5c0 100644
--- a/drivers/char/rio/riointr.c
+++ b/drivers/char/rio/riointr.c
@@ -546,7 +546,7 @@ static void RIOReceive(struct rio_info *p, struct Port *PortP)
** run out of space it will be set to the offset of the
** next byte to copy from the packet data area. The packet
** length field is decremented by the number of bytes that
- ** we succesfully removed from the packet. When this reaches
+ ** we successfully removed from the packet. When this reaches
** zero, we reset the offset pointer to be zero, and free
** the packet from the front of the queue.
*/
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 664a6e97eb1a..5a280a330401 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -1,4 +1,4 @@
-/* linux/drivers/char/scx200_gpio.c
+/* linux/drivers/char/scx200_gpio.c
National Semiconductor SCx200 GPIO driver. Allows a user space
process to play with the GPIO pins.
@@ -6,17 +6,26 @@
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
#include <linux/config.h>
+#include <linux/device.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
#include <linux/scx200_gpio.h>
+#include <linux/nsc_gpio.h>
#define NAME "scx200_gpio"
+#define DEVNAME NAME
+
+static struct platform_device *pdev;
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver");
@@ -26,70 +35,23 @@ static int major = 0; /* default to dynamic major */
module_param(major, int, 0);
MODULE_PARM_DESC(major, "Major device number");
-static ssize_t scx200_gpio_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned m = iminor(file->f_dentry->d_inode);
- size_t i;
-
- for (i = 0; i < len; ++i) {
- char c;
- if (get_user(c, data+i))
- return -EFAULT;
- switch (c)
- {
- case '0':
- scx200_gpio_set(m, 0);
- break;
- case '1':
- scx200_gpio_set(m, 1);
- break;
- case 'O':
- printk(KERN_INFO NAME ": GPIO%d output enabled\n", m);
- scx200_gpio_configure(m, ~1, 1);
- break;
- case 'o':
- printk(KERN_INFO NAME ": GPIO%d output disabled\n", m);
- scx200_gpio_configure(m, ~1, 0);
- break;
- case 'T':
- printk(KERN_INFO NAME ": GPIO%d output is push pull\n", m);
- scx200_gpio_configure(m, ~2, 2);
- break;
- case 't':
- printk(KERN_INFO NAME ": GPIO%d output is open drain\n", m);
- scx200_gpio_configure(m, ~2, 0);
- break;
- case 'P':
- printk(KERN_INFO NAME ": GPIO%d pull up enabled\n", m);
- scx200_gpio_configure(m, ~4, 4);
- break;
- case 'p':
- printk(KERN_INFO NAME ": GPIO%d pull up disabled\n", m);
- scx200_gpio_configure(m, ~4, 0);
- break;
- }
- }
-
- return len;
-}
-
-static ssize_t scx200_gpio_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- unsigned m = iminor(file->f_dentry->d_inode);
- int value;
-
- value = scx200_gpio_get(m);
- if (put_user(value ? '1' : '0', buf))
- return -EFAULT;
-
- return 1;
-}
+struct nsc_gpio_ops scx200_access = {
+ .owner = THIS_MODULE,
+ .gpio_config = scx200_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = scx200_gpio_get,
+ .gpio_set = scx200_gpio_set,
+ .gpio_set_high = scx200_gpio_set_high,
+ .gpio_set_low = scx200_gpio_set_low,
+ .gpio_change = scx200_gpio_change,
+ .gpio_current = scx200_gpio_current
+};
static int scx200_gpio_open(struct inode *inode, struct file *file)
{
unsigned m = iminor(inode);
+ file->private_data = &scx200_access;
+
if (m > 63)
return -EINVAL;
return nonseekable_open(inode, file);
@@ -103,47 +65,81 @@ static int scx200_gpio_release(struct inode *inode, struct file *file)
static struct file_operations scx200_gpio_fops = {
.owner = THIS_MODULE,
- .write = scx200_gpio_write,
- .read = scx200_gpio_read,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
.open = scx200_gpio_open,
.release = scx200_gpio_release,
};
+struct cdev *scx200_devices;
+static int num_pins = 32;
+
static int __init scx200_gpio_init(void)
{
- int r;
-
- printk(KERN_DEBUG NAME ": NatSemi SCx200 GPIO Driver\n");
+ int rc, i;
+ dev_t dev = MKDEV(major, 0);
if (!scx200_gpio_present()) {
- printk(KERN_ERR NAME ": no SCx200 gpio pins available\n");
+ printk(KERN_ERR NAME ": no SCx200 gpio present\n");
return -ENODEV;
}
- r = register_chrdev(major, NAME, &scx200_gpio_fops);
- if (r < 0) {
- printk(KERN_ERR NAME ": unable to register character device\n");
- return r;
+ /* support dev_dbg() with pdev->dev */
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc)
+ goto undo_malloc;
+
+ /* nsc_gpio uses dev_dbg(), so needs this */
+ scx200_access.dev = &pdev->dev;
+
+ if (major)
+ rc = register_chrdev_region(dev, num_pins, "scx200_gpio");
+ else {
+ rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio");
+ major = MAJOR(dev);
}
- if (!major) {
- major = r;
- printk(KERN_DEBUG NAME ": got dynamic major %d\n", major);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
+ goto undo_platform_device_add;
+ }
+ scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL);
+ if (!scx200_devices) {
+ rc = -ENOMEM;
+ goto undo_chrdev_region;
+ }
+ for (i = 0; i < num_pins; i++) {
+ struct cdev *cdev = &scx200_devices[i];
+ cdev_init(cdev, &scx200_gpio_fops);
+ cdev->owner = THIS_MODULE;
+ rc = cdev_add(cdev, MKDEV(major, i), 1);
+ /* tolerate 'minor' errors */
+ if (rc)
+ dev_err(&pdev->dev, "Error %d on minor %d", rc, i);
}
- return 0;
+ return 0; /* succeed */
+
+undo_chrdev_region:
+ unregister_chrdev_region(dev, num_pins);
+undo_platform_device_add:
+ platform_device_put(pdev);
+undo_malloc:
+ kfree(pdev);
+ return rc;
}
static void __exit scx200_gpio_cleanup(void)
{
- unregister_chrdev(major, NAME);
+ kfree(scx200_devices);
+ unregister_chrdev_region(MKDEV(major, 0), num_pins);
+ platform_device_put(pdev);
+ platform_device_unregister(pdev);
+ /* kfree(pdev); */
}
module_init(scx200_gpio_init);
module_exit(scx200_gpio_cleanup);
-
-/*
- Local variables:
- compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 1b5330299e30..d2d6b01dcd05 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2477,7 +2477,7 @@ static int __init specialix_init(void)
#endif
for (i = 0; i < SX_NBOARD; i++)
- sx_board[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&sx_board[i].lock);
if (sx_init_drivers()) {
func_exit();
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index a9c5a7230f89..00b4a2187164 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -141,15 +141,6 @@ static char *stl_drvversion = "5.6.0";
static struct tty_driver *stl_serial;
/*
- * We will need to allocate a temporary write buffer for chars that
- * come direct from user space. The problem is that a copy from user
- * space might cause a page fault (typically on a system that is
- * swapping!). All ports will share one buffer - since if the system
- * is already swapping a shared buffer won't make things any worse.
- */
-static char *stl_tmpwritebuf;
-
-/*
* Define a local default termios struct. All ports will be created
* with this termios initially. Basically all it defines is a raw port
* at 9600, 8 data bits, 1 stop bit.
@@ -363,6 +354,14 @@ static unsigned char stl_vecmap[] = {
};
/*
+ * Lock ordering is that you may not take stallion_lock holding
+ * brd_lock.
+ */
+
+static spinlock_t brd_lock; /* Guard the board mapping */
+static spinlock_t stallion_lock; /* Guard the tty driver */
+
+/*
* Set up enable and disable macros for the ECH boards. They require
* the secondary io address space to be activated and deactivated.
* This way all ECH boards can share their secondary io region.
@@ -725,17 +724,7 @@ static struct class *stallion_class;
static int __init stallion_module_init(void)
{
- unsigned long flags;
-
-#ifdef DEBUG
- printk("init_module()\n");
-#endif
-
- save_flags(flags);
- cli();
stl_init();
- restore_flags(flags);
-
return 0;
}
@@ -746,7 +735,6 @@ static void __exit stallion_module_exit(void)
stlbrd_t *brdp;
stlpanel_t *panelp;
stlport_t *portp;
- unsigned long flags;
int i, j, k;
#ifdef DEBUG
@@ -756,9 +744,6 @@ static void __exit stallion_module_exit(void)
printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle,
stl_drvversion);
- save_flags(flags);
- cli();
-
/*
* Free up all allocated resources used by the ports. This includes
* memory and interrupts. As part of this process we will also do
@@ -770,7 +755,6 @@ static void __exit stallion_module_exit(void)
if (i) {
printk("STALLION: failed to un-register tty driver, "
"errno=%d\n", -i);
- restore_flags(flags);
return;
}
for (i = 0; i < 4; i++) {
@@ -783,8 +767,6 @@ static void __exit stallion_module_exit(void)
"errno=%d\n", -i);
class_destroy(stallion_class);
- kfree(stl_tmpwritebuf);
-
for (i = 0; (i < stl_nrbrds); i++) {
if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL)
continue;
@@ -814,8 +796,6 @@ static void __exit stallion_module_exit(void)
kfree(brdp);
stl_brds[i] = (stlbrd_t *) NULL;
}
-
- restore_flags(flags);
}
module_init(stallion_module_init);
@@ -948,7 +928,7 @@ static stlbrd_t *stl_allocbrd(void)
brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
if (!brdp) {
- printk("STALLION: failed to allocate memory (size=%d)\n",
+ printk("STALLION: failed to allocate memory (size=%Zd)\n",
sizeof(stlbrd_t));
return NULL;
}
@@ -1066,16 +1046,17 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
rc = 0;
doclocal = 0;
+ spin_lock_irqsave(&stallion_lock, flags);
+
if (portp->tty->termios->c_cflag & CLOCAL)
doclocal++;
- save_flags(flags);
- cli();
portp->openwaitcnt++;
if (! tty_hung_up_p(filp))
portp->refcount--;
for (;;) {
+ /* Takes brd_lock internally */
stl_setsignals(portp, 1, 1);
if (tty_hung_up_p(filp) ||
((portp->flags & ASYNC_INITIALIZED) == 0)) {
@@ -1093,13 +1074,14 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
rc = -ERESTARTSYS;
break;
}
+ /* FIXME */
interruptible_sleep_on(&portp->open_wait);
}
if (! tty_hung_up_p(filp))
portp->refcount++;
portp->openwaitcnt--;
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return rc;
}
@@ -1119,16 +1101,15 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stallion_lock, flags);
if (tty_hung_up_p(filp)) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return;
}
if ((tty->count == 1) && (portp->refcount != 1))
portp->refcount = 1;
if (portp->refcount-- > 1) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return;
}
@@ -1142,11 +1123,18 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
* (The sc26198 has no "end-of-data" interrupt only empty FIFO)
*/
tty->closing = 1;
+
+ spin_unlock_irqrestore(&stallion_lock, flags);
+
if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, portp->closing_wait);
stl_waituntilsent(tty, (HZ / 2));
+
+ spin_lock_irqsave(&stallion_lock, flags);
portp->flags &= ~ASYNC_INITIALIZED;
+ spin_unlock_irqrestore(&stallion_lock, flags);
+
stl_disableintrs(portp);
if (tty->termios->c_cflag & HUPCL)
stl_setsignals(portp, 0, 0);
@@ -1173,7 +1161,6 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&portp->close_wait);
- restore_flags(flags);
}
/*****************************************************************************/
@@ -1195,9 +1182,6 @@ static int stl_write(struct tty_struct *tty, const unsigned char *buf, int count
(int) tty, (int) buf, count);
#endif
- if ((tty == (struct tty_struct *) NULL) ||
- (stl_tmpwritebuf == (char *) NULL))
- return 0;
portp = tty->driver_data;
if (portp == (stlport_t *) NULL)
return 0;
@@ -1302,11 +1286,6 @@ static void stl_flushchars(struct tty_struct *tty)
if (portp->tx.buf == (char *) NULL)
return;
-#if 0
- if (tty->stopped || tty->hw_stopped ||
- (portp->tx.head == portp->tx.tail))
- return;
-#endif
stl_startrxtx(portp, -1, 1);
}
@@ -1977,12 +1956,14 @@ static int stl_eiointr(stlbrd_t *brdp)
unsigned int iobase;
int handled = 0;
+ spin_lock(&brd_lock);
panelp = brdp->panels[0];
iobase = panelp->iobase;
while (inb(brdp->iostatus) & EIO_INTRPEND) {
handled = 1;
(* panelp->isr)(panelp, iobase);
}
+ spin_unlock(&brd_lock);
return handled;
}
@@ -2168,7 +2149,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
if (!portp) {
printk("STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlport_t));
+ "(size=%Zd)\n", sizeof(stlport_t));
break;
}
@@ -2304,7 +2285,7 @@ static inline int stl_initeio(stlbrd_t *brdp)
panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
if (!panelp) {
printk(KERN_WARNING "STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlpanel_t));
+ "(size=%Zd)\n", sizeof(stlpanel_t));
return -ENOMEM;
}
@@ -2478,7 +2459,7 @@ static inline int stl_initech(stlbrd_t *brdp)
panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
if (!panelp) {
printk("STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlpanel_t));
+ "(size=%Zd)\n", sizeof(stlpanel_t));
break;
}
panelp->magic = STL_PANELMAGIC;
@@ -2879,8 +2860,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
portp->stats.lflags = 0;
portp->stats.rxbuffered = 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stallion_lock, flags);
if (portp->tty != (struct tty_struct *) NULL) {
if (portp->tty->driver_data == portp) {
portp->stats.ttystate = portp->tty->flags;
@@ -2894,7 +2874,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
}
}
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
head = portp->tx.head;
tail = portp->tx.tail;
@@ -3049,6 +3029,9 @@ static int __init stl_init(void)
int i;
printk(KERN_INFO "%s: version %s\n", stl_drvtitle, stl_drvversion);
+ spin_lock_init(&stallion_lock);
+ spin_lock_init(&brd_lock);
+
stl_initbrds();
stl_serial = alloc_tty_driver(STL_MAXBRDS * STL_MAXPORTS);
@@ -3056,14 +3039,6 @@ static int __init stl_init(void)
return -1;
/*
- * Allocate a temporary write buffer.
- */
- stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
- if (!stl_tmpwritebuf)
- printk("STALLION: failed to allocate memory (size=%d)\n",
- STL_TXBUFSIZE);
-
-/*
* Set up a character driver for per board stuff. This is mainly used
* to do stats ioctls on the ports.
*/
@@ -3147,11 +3122,13 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
unsigned int gfrcr;
int chipmask, i, j;
int nrchips, uartaddr, ioaddr;
+ unsigned long flags;
#ifdef DEBUG
printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(panelp->brdnr, panelp->pagenr);
/*
@@ -3189,6 +3166,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
}
BRDDISABLE(panelp->brdnr);
+ spin_unlock_irqrestore(&brd_lock, flags);
return chipmask;
}
@@ -3200,6 +3178,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
+ unsigned long flags;
#ifdef DEBUG
printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
@@ -3209,6 +3188,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
(portp == (stlport_t *) NULL))
return;
+ spin_lock_irqsave(&brd_lock, flags);
portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) ||
(portp->portnr < 8)) ? 0 : EREG_BANKSIZE);
portp->uartaddr = (portp->portnr & 0x04) << 5;
@@ -3219,6 +3199,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
stl_cd1400setreg(portp, LIVR, (portp->portnr << 3));
portp->hwid = stl_cd1400getreg(portp, GFRCR);
BRDDISABLE(portp->brdnr);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3428,8 +3409,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3));
srer = stl_cd1400getreg(portp, SRER);
@@ -3466,7 +3446,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
portp->sigs &= ~TIOCM_CD;
stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron));
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3492,8 +3472,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
if (rts > 0)
msvr2 = MSVR2_RTS;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
if (rts >= 0)
@@ -3501,7 +3480,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
if (dtr >= 0)
stl_cd1400setreg(portp, MSVR1, msvr1);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3520,14 +3499,13 @@ static int stl_cd1400getsignals(stlport_t *portp)
printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
msvr1 = stl_cd1400getreg(portp, MSVR1);
msvr2 = stl_cd1400getreg(portp, MSVR2);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
sigs = 0;
sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0;
@@ -3569,15 +3547,14 @@ static void stl_cd1400enablerxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
ccr |= CCR_RXENABLE;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400ccrwait(portp);
stl_cd1400setreg(portp, CCR, ccr);
stl_cd1400ccrwait(portp);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3609,8 +3586,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
sreron |= SRER_RXDATA;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER,
@@ -3618,7 +3594,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
BRDDISABLE(portp->brdnr);
if (tx > 0)
set_bit(ASYI_TXBUSY, &portp->istate);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3634,13 +3610,12 @@ static void stl_cd1400disableintrs(stlport_t *portp)
#ifdef DEBUG
printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER, 0);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3653,8 +3628,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER,
@@ -3664,7 +3638,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
portp->brklen = len;
if (len == 1)
portp->stats.txbreaks++;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3688,8 +3662,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
@@ -3729,7 +3702,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3753,8 +3726,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
if (state) {
@@ -3769,7 +3741,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
stl_cd1400ccrwait(portp);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3785,8 +3757,7 @@ static void stl_cd1400flush(stlport_t *portp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400ccrwait(portp);
@@ -3794,7 +3765,7 @@ static void stl_cd1400flush(stlport_t *portp)
stl_cd1400ccrwait(portp);
portp->tx.tail = portp->tx.head;
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3833,6 +3804,7 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
(int) panelp, iobase);
#endif
+ spin_lock(&brd_lock);
outb(SVRR, iobase);
svrtype = inb(iobase + EREG_DATA);
if (panelp->nrports > 4) {
@@ -3846,6 +3818,8 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
stl_cd1400txisr(panelp, iobase);
else if (svrtype & SVRR_MDM)
stl_cd1400mdmisr(panelp, iobase);
+
+ spin_unlock(&brd_lock);
}
/*****************************************************************************/
@@ -4433,8 +4407,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IMR, 0);
stl_sc26198updatereg(portp, MR0, mr0);
@@ -4461,7 +4434,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
portp->imr = (portp->imr & ~imroff) | imron;
stl_sc26198setreg(portp, IMR, portp->imr);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4491,13 +4464,12 @@ static void stl_sc26198setsignals(stlport_t *portp, int dtr, int rts)
else if (rts > 0)
iopioron |= IPR_RTS;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IOPIOR,
((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron));
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4516,12 +4488,11 @@ static int stl_sc26198getsignals(stlport_t *portp)
printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
ipr = stl_sc26198getreg(portp, IPR);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
sigs = 0;
sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD;
@@ -4558,13 +4529,12 @@ static void stl_sc26198enablerxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
ccr |= CR_RXENABLE;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, SCCR, ccr);
BRDDISABLE(portp->brdnr);
portp->crenable = ccr;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4593,15 +4563,14 @@ static void stl_sc26198startrxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IMR, imr);
BRDDISABLE(portp->brdnr);
portp->imr = imr;
if (tx > 0)
set_bit(ASYI_TXBUSY, &portp->istate);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4618,13 +4587,12 @@ static void stl_sc26198disableintrs(stlport_t *portp)
printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
portp->imr = 0;
stl_sc26198setreg(portp, IMR, 0);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4637,8 +4605,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (len == 1) {
stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK);
@@ -4647,7 +4614,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4672,8 +4639,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (state) {
@@ -4719,7 +4685,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4744,8 +4710,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (state) {
mr0 = stl_sc26198getreg(portp, MR0);
@@ -4765,7 +4730,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
stl_sc26198setreg(portp, MR0, mr0);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4781,14 +4746,13 @@ static void stl_sc26198flush(stlport_t *portp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, SCCR, CR_TXRESET);
stl_sc26198setreg(portp, SCCR, portp->crenable);
BRDDISABLE(portp->brdnr);
portp->tx.tail = portp->tx.head;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4815,12 +4779,11 @@ static int stl_sc26198datastate(stlport_t *portp)
if (test_bit(ASYI_TXBUSY, &portp->istate))
return 1;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
sr = stl_sc26198getreg(portp, SR);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
return (sr & SR_TXEMPTY) ? 0 : 1;
}
@@ -4878,6 +4841,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
stlport_t *portp;
unsigned int iack;
+ spin_lock(&brd_lock);
+
/*
* Work around bug in sc26198 chip... Cannot have A6 address
* line of UART high, else iack will be returned as 0.
@@ -4893,6 +4858,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
stl_sc26198txisr(portp);
else
stl_sc26198otherisr(portp, iack);
+
+ spin_unlock(&brd_lock);
}
/*****************************************************************************/
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 3b4747230270..76b9107f7f81 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2320,7 +2320,7 @@ static int sx_init_portstructs (int nboards, int nports)
#ifdef NEW_WRITE_LOCKING
port->gs.port_write_mutex = MUTEX;
#endif
- port->gs.driver_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&port->gs.driver_lock);
/*
* Initializing wait queue
*/
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index f58ad7f68267..ef68d152d3e4 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -343,7 +343,7 @@ static ssize_t store_received_ref_clk3b(struct device *d,
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
- SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
+ SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 8b2a59969868..8d19f7281f0b 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -267,7 +267,6 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
p->used = 0;
p->size = size;
p->next = NULL;
- p->active = 0;
p->commit = 0;
p->read = 0;
p->char_buf_ptr = (char *)(p->data);
@@ -327,10 +326,9 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
remove this conditional if its worth it. This would be invisible
to the callers */
- if ((b = tty->buf.tail) != NULL) {
+ if ((b = tty->buf.tail) != NULL)
left = b->size - b->used;
- b->active = 1;
- } else
+ else
left = 0;
if (left < size) {
@@ -338,12 +336,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
if ((n = tty_buffer_find(tty, size)) != NULL) {
if (b != NULL) {
b->next = n;
- b->active = 0;
b->commit = b->used;
} else
tty->buf.head = n;
tty->buf.tail = n;
- n->active = 1;
} else
size = left;
}
@@ -404,10 +400,8 @@ void tty_schedule_flip(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL) {
- tty->buf.tail->active = 0;
+ if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
- }
spin_unlock_irqrestore(&tty->buf.lock, flags);
schedule_delayed_work(&tty->buf.work, 1);
}
@@ -784,11 +778,8 @@ restart:
}
clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
- if (o_tty) {
+ if (o_tty)
clear_bit(TTY_LDISC, &o_tty->flags);
- clear_bit(TTY_DONT_FLIP, &o_tty->flags);
- }
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
/*
@@ -1955,7 +1946,6 @@ static void release_dev(struct file * filp)
* race with the set_ldisc code path.
*/
clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
cancel_delayed_work(&tty->buf.work);
/*
@@ -2621,10 +2611,9 @@ int tty_ioctl(struct inode * inode, struct file * file,
tty->driver->break_ctl(tty, 0);
return 0;
case TCSBRK: /* SVID version: non-zero arg --> no break */
- /*
- * XXX is the above comment correct, or the
- * code below correct? Is this ioctl used at
- * all by anyone?
+ /* non-zero arg means wait for all output data
+ * to be sent (performed above) but don't send break.
+ * This is used by the tcdrain() termios function.
*/
if (!arg)
return send_break(tty, 250);
@@ -2776,8 +2765,7 @@ static void flush_to_ldisc(void *private_)
struct tty_struct *tty = (struct tty_struct *) private_;
unsigned long flags;
struct tty_ldisc *disc;
- struct tty_buffer *tbuf;
- int count;
+ struct tty_buffer *tbuf, *head;
char *char_buf;
unsigned char *flag_buf;
@@ -2785,32 +2773,37 @@ static void flush_to_ldisc(void *private_)
if (disc == NULL) /* !TTY_LDISC */
return;
- if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- /*
- * Do it after the next timer tick:
- */
- schedule_delayed_work(&tty->buf.work, 1);
- goto out;
- }
spin_lock_irqsave(&tty->buf.lock, flags);
- while((tbuf = tty->buf.head) != NULL) {
- while ((count = tbuf->commit - tbuf->read) != 0) {
- char_buf = tbuf->char_buf_ptr + tbuf->read;
- flag_buf = tbuf->flag_buf_ptr + tbuf->read;
- tbuf->read += count;
+ head = tty->buf.head;
+ if (head != NULL) {
+ tty->buf.head = NULL;
+ for (;;) {
+ int count = head->commit - head->read;
+ if (!count) {
+ if (head->next == NULL)
+ break;
+ tbuf = head;
+ head = head->next;
+ tty_buffer_free(tty, tbuf);
+ continue;
+ }
+ if (!tty->receive_room) {
+ schedule_delayed_work(&tty->buf.work, 1);
+ break;
+ }
+ if (count > tty->receive_room)
+ count = tty->receive_room;
+ char_buf = head->char_buf_ptr + head->read;
+ flag_buf = head->flag_buf_ptr + head->read;
+ head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
disc->receive_buf(tty, char_buf, flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
}
- if (tbuf->active)
- break;
- tty->buf.head = tbuf->next;
- if (tty->buf.head == NULL)
- tty->buf.tail = NULL;
- tty_buffer_free(tty, tbuf);
+ tty->buf.head = head;
}
spin_unlock_irqrestore(&tty->buf.lock, flags);
-out:
+
tty_ldisc_deref(disc);
}
@@ -2903,10 +2896,8 @@ void tty_flip_buffer_push(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL) {
- tty->buf.tail->active = 0;
+ if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
- }
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 05e6e814d86f..073da48c092e 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -689,9 +689,9 @@ static int __devinit giu_probe(struct platform_device *dev)
for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
if (i < GIU_IRQ(GIUINT_HIGH_OFFSET))
- irq_desc[i].handler = &giuint_low_irq_type;
+ irq_desc[i].chip = &giuint_low_irq_type;
else
- irq_desc[i].handler = &giuint_high_irq_type;
+ irq_desc[i].chip = &giuint_high_irq_type;
}
return cascade_irq(GIUINT_IRQ, giu_get_irq);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6c94879e0b99..714d95ff2f1e 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -98,7 +98,22 @@
#include <asm/system.h>
#include <asm/uaccess.h>
+#define MAX_NR_CON_DRIVER 16
+#define CON_DRIVER_FLAG_MODULE 1
+#define CON_DRIVER_FLAG_INIT 2
+
+struct con_driver {
+ const struct consw *con;
+ const char *desc;
+ struct class_device *class_dev;
+ int node;
+ int first;
+ int last;
+ int flag;
+};
+
+static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER];
const struct consw *conswitchp;
/* A bitmap for codes <32. A bit of 1 indicates that the code
@@ -2557,7 +2572,7 @@ static int __init con_init(void)
{
const char *display_desc = NULL;
struct vc_data *vc;
- unsigned int currcons = 0;
+ unsigned int currcons = 0, i;
acquire_console_sem();
@@ -2569,6 +2584,22 @@ static int __init con_init(void)
return 0;
}
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == NULL) {
+ con_driver->con = conswitchp;
+ con_driver->desc = display_desc;
+ con_driver->flag = CON_DRIVER_FLAG_INIT;
+ con_driver->first = 0;
+ con_driver->last = MAX_NR_CONSOLES - 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ con_driver_map[i] = conswitchp;
+
init_timer(&console_timer);
console_timer.function = blank_screen_t;
if (blankinterval) {
@@ -2656,38 +2687,53 @@ int __init vty_init(void)
}
#ifndef VT_SINGLE_DRIVER
+#include <linux/device.h>
-/*
- * If we support more console drivers, this function is used
- * when a driver wants to take over some existing consoles
- * and become default driver for newly opened ones.
- */
+static struct class *vtconsole_class;
-int take_over_console(const struct consw *csw, int first, int last, int deflt)
+static int bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
{
- int i, j = -1;
- const char *desc;
- struct module *owner;
+ struct module *owner = csw->owner;
+ const char *desc = NULL;
+ struct con_driver *con_driver;
+ int i, j = -1, k = -1, retval = -ENODEV;
- owner = csw->owner;
if (!try_module_get(owner))
return -ENODEV;
acquire_console_sem();
- desc = csw->con_startup();
- if (!desc) {
- release_console_sem();
- module_put(owner);
- return -ENODEV;
+ /* check if driver is registered */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw) {
+ desc = con_driver->desc;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval)
+ goto err;
+
+ if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) {
+ csw->con_startup();
+ con_driver->flag |= CON_DRIVER_FLAG_INIT;
}
+
if (deflt) {
if (conswitchp)
module_put(conswitchp->owner);
+
__module_get(owner);
conswitchp = csw;
}
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+
for (i = first; i <= last; i++) {
int old_was_color;
struct vc_data *vc = vc_cons[i].d;
@@ -2701,15 +2747,17 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
continue;
j = i;
- if (CON_IS_VISIBLE(vc))
+
+ if (CON_IS_VISIBLE(vc)) {
+ k = i;
save_screen(vc);
+ }
+
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
- vc->vc_visible_origin = vc->vc_origin;
- vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size;
- vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x;
visual_init(vc, i, 0);
+ set_origin(vc);
update_attr(vc);
/* If the console changed between mono <-> color, then
@@ -2718,36 +2766,506 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
*/
if (old_was_color != vc->vc_can_do_color)
clear_buffer_attributes(vc);
-
- if (CON_IS_VISIBLE(vc))
- update_screen(vc);
}
+
printk("Console: switching ");
if (!deflt)
printk("consoles %d-%d ", first+1, last+1);
- if (j >= 0)
+ if (j >= 0) {
+ struct vc_data *vc = vc_cons[j].d;
+
printk("to %s %s %dx%d\n",
- vc_cons[j].d->vc_can_do_color ? "colour" : "mono",
- desc, vc_cons[j].d->vc_cols, vc_cons[j].d->vc_rows);
- else
+ vc->vc_can_do_color ? "colour" : "mono",
+ desc, vc->vc_cols, vc->vc_rows);
+
+ if (k >= 0) {
+ vc = vc_cons[k].d;
+ update_screen(vc);
+ }
+ } else
printk("to %s\n", desc);
+ retval = 0;
+err:
release_console_sem();
+ module_put(owner);
+ return retval;
+};
+
+#ifdef CONFIG_VT_HW_CONSOLE_BINDING
+static int con_is_graphics(const struct consw *csw, int first, int last)
+{
+ int i, retval = 0;
+
+ for (i = first; i <= last; i++) {
+ struct vc_data *vc = vc_cons[i].d;
+
+ if (vc && vc->vc_mode == KD_GRAPHICS) {
+ retval = 1;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+static int unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+{
+ struct module *owner = csw->owner;
+ const struct consw *defcsw = NULL;
+ struct con_driver *con_driver = NULL, *con_back = NULL;
+ int i, retval = -ENODEV;
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ acquire_console_sem();
+
+ /* check if driver is registered and if it is unbindable */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw &&
+ con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval) {
+ release_console_sem();
+ goto err;
+ }
+
+ retval = -ENODEV;
+
+ /* check if backup driver exists */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_back = &registered_con_driver[i];
+
+ if (con_back->con &&
+ !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
+ defcsw = con_back->con;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval) {
+ release_console_sem();
+ goto err;
+ }
+
+ if (!con_is_bound(csw)) {
+ release_console_sem();
+ goto err;
+ }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+
+ for (i = first; i <= last; i++) {
+ if (con_driver_map[i] == csw) {
+ module_put(csw->owner);
+ con_driver_map[i] = NULL;
+ }
+ }
+
+ if (!con_is_bound(defcsw)) {
+ const struct consw *defconsw = conswitchp;
+
+ defcsw->con_startup();
+ con_back->flag |= CON_DRIVER_FLAG_INIT;
+ /*
+ * vgacon may change the default driver to point
+ * to dummycon, we restore it here...
+ */
+ conswitchp = defconsw;
+ }
+
+ if (!con_is_bound(csw))
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+ release_console_sem();
+ /* ignore return value, binding should not fail */
+ bind_con_driver(defcsw, first, last, deflt);
+err:
module_put(owner);
+ return retval;
+
+}
+
+static int vt_bind(struct con_driver *con)
+{
+ const struct consw *defcsw = NULL, *csw = NULL;
+ int i, more = 1, first = -1, last = -1, deflt = 0;
+
+ if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
+ con_is_graphics(con->con, con->first, con->last))
+ goto err;
+
+ csw = con->con;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con = &registered_con_driver[i];
+
+ if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) {
+ defcsw = con->con;
+ break;
+ }
+ }
+
+ if (!defcsw)
+ goto err;
+
+ while (more) {
+ more = 0;
+
+ for (i = con->first; i <= con->last; i++) {
+ if (con_driver_map[i] == defcsw) {
+ if (first == -1)
+ first = i;
+ last = i;
+ more = 1;
+ } else if (first != -1)
+ break;
+ }
+
+ if (first == 0 && last == MAX_NR_CONSOLES -1)
+ deflt = 1;
+
+ if (first != -1)
+ bind_con_driver(csw, first, last, deflt);
+
+ first = -1;
+ last = -1;
+ deflt = 0;
+ }
+
+err:
return 0;
}
-void give_up_console(const struct consw *csw)
+static int vt_unbind(struct con_driver *con)
+{
+ const struct consw *csw = NULL;
+ int i, more = 1, first = -1, last = -1, deflt = 0;
+
+ if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
+ con_is_graphics(con->con, con->first, con->last))
+ goto err;
+
+ csw = con->con;
+
+ while (more) {
+ more = 0;
+
+ for (i = con->first; i <= con->last; i++) {
+ if (con_driver_map[i] == csw) {
+ if (first == -1)
+ first = i;
+ last = i;
+ more = 1;
+ } else if (first != -1)
+ break;
+ }
+
+ if (first == 0 && last == MAX_NR_CONSOLES -1)
+ deflt = 1;
+
+ if (first != -1)
+ unbind_con_driver(csw, first, last, deflt);
+
+ first = -1;
+ last = -1;
+ deflt = 0;
+ }
+
+err:
+ return 0;
+}
+#else
+static inline int vt_bind(struct con_driver *con)
+{
+ return 0;
+}
+static inline int vt_unbind(struct con_driver *con)
+{
+ return 0;
+}
+#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
+static ssize_t store_bind(struct class_device *class_device,
+ const char *buf, size_t count)
+{
+ struct con_driver *con = class_get_devdata(class_device);
+ int bind = simple_strtoul(buf, NULL, 0);
+
+ if (bind)
+ vt_bind(con);
+ else
+ vt_unbind(con);
+
+ return count;
+}
+
+static ssize_t show_bind(struct class_device *class_device, char *buf)
+{
+ struct con_driver *con = class_get_devdata(class_device);
+ int bind = con_is_bound(con->con);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+}
+
+static ssize_t show_name(struct class_device *class_device, char *buf)
+{
+ struct con_driver *con = class_get_devdata(class_device);
+
+ return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ (con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
+ con->desc);
+
+}
+
+static struct class_device_attribute class_device_attrs[] = {
+ __ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
+ __ATTR(name, S_IRUGO, show_name, NULL),
+};
+
+static int vtconsole_init_class_device(struct con_driver *con)
+{
+ int i;
+
+ class_set_devdata(con->class_dev, con);
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_create_file(con->class_dev,
+ &class_device_attrs[i]);
+
+ return 0;
+}
+
+static void vtconsole_deinit_class_device(struct con_driver *con)
{
int i;
- for(i = 0; i < MAX_NR_CONSOLES; i++)
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_remove_file(con->class_dev,
+ &class_device_attrs[i]);
+}
+
+/**
+ * con_is_bound - checks if driver is bound to the console
+ * @csw: console driver
+ *
+ * RETURNS: zero if unbound, nonzero if bound
+ *
+ * Drivers can call this and if zero, they should release
+ * all resources allocated on con_startup()
+ */
+int con_is_bound(const struct consw *csw)
+{
+ int i, bound = 0;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (con_driver_map[i] == csw) {
- module_put(csw->owner);
- con_driver_map[i] = NULL;
+ bound = 1;
+ break;
+ }
+ }
+
+ return bound;
+}
+EXPORT_SYMBOL(con_is_bound);
+
+/**
+ * register_con_driver - register console driver to console layer
+ * @csw: console driver
+ * @first: the first console to take over, minimum value is 0
+ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+ *
+ * DESCRIPTION: This function registers a console driver which can later
+ * bind to a range of consoles specified by @first and @last. It will
+ * also initialize the console driver by calling con_startup().
+ */
+int register_con_driver(const struct consw *csw, int first, int last)
+{
+ struct module *owner = csw->owner;
+ struct con_driver *con_driver;
+ const char *desc;
+ int i, retval = 0;
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ acquire_console_sem();
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ /* already registered */
+ if (con_driver->con == csw)
+ retval = -EINVAL;
+ }
+
+ if (retval)
+ goto err;
+
+ desc = csw->con_startup();
+
+ if (!desc)
+ goto err;
+
+ retval = -EINVAL;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == NULL) {
+ con_driver->con = csw;
+ con_driver->desc = desc;
+ con_driver->node = i;
+ con_driver->flag = CON_DRIVER_FLAG_MODULE |
+ CON_DRIVER_FLAG_INIT;
+ con_driver->first = first;
+ con_driver->last = last;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval)
+ goto err;
+
+ con_driver->class_dev = class_device_create(vtconsole_class, NULL,
+ MKDEV(0, con_driver->node),
+ NULL, "vtcon%i",
+ con_driver->node);
+
+ if (IS_ERR(con_driver->class_dev)) {
+ printk(KERN_WARNING "Unable to create class_device for %s; "
+ "errno = %ld\n", con_driver->desc,
+ PTR_ERR(con_driver->class_dev));
+ con_driver->class_dev = NULL;
+ } else {
+ vtconsole_init_class_device(con_driver);
+ }
+err:
+ release_console_sem();
+ module_put(owner);
+ return retval;
+}
+EXPORT_SYMBOL(register_con_driver);
+
+/**
+ * unregister_con_driver - unregister console driver from console layer
+ * @csw: console driver
+ *
+ * DESCRIPTION: All drivers that registers to the console layer must
+ * call this function upon exit, or if the console driver is in a state
+ * where it won't be able to handle console services, such as the
+ * framebuffer console without loaded framebuffer drivers.
+ *
+ * The driver must unbind first prior to unregistration.
+ */
+int unregister_con_driver(const struct consw *csw)
+{
+ int i, retval = -ENODEV;
+
+ acquire_console_sem();
+
+ /* cannot unregister a bound driver */
+ if (con_is_bound(csw))
+ goto err;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw &&
+ con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+ vtconsole_deinit_class_device(con_driver);
+ class_device_destroy(vtconsole_class,
+ MKDEV(0, con_driver->node));
+ con_driver->con = NULL;
+ con_driver->desc = NULL;
+ con_driver->class_dev = NULL;
+ con_driver->node = 0;
+ con_driver->flag = 0;
+ con_driver->first = 0;
+ con_driver->last = 0;
+ retval = 0;
+ break;
+ }
+ }
+err:
+ release_console_sem();
+ return retval;
+}
+EXPORT_SYMBOL(unregister_con_driver);
+
+/*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+ * take_over_console is basically a register followed by unbind
+ */
+int take_over_console(const struct consw *csw, int first, int last, int deflt)
+{
+ int err;
+
+ err = register_con_driver(csw, first, last);
+
+ if (!err)
+ bind_con_driver(csw, first, last, deflt);
+
+ return err;
+}
+
+/*
+ * give_up_console is a wrapper to unregister_con_driver. It will only
+ * work if driver is fully unbound.
+ */
+void give_up_console(const struct consw *csw)
+{
+ unregister_con_driver(csw);
+}
+
+static int __init vtconsole_class_init(void)
+{
+ int i;
+
+ vtconsole_class = class_create(THIS_MODULE, "vtconsole");
+ if (IS_ERR(vtconsole_class)) {
+ printk(KERN_WARNING "Unable to create vt console class; "
+ "errno = %ld\n", PTR_ERR(vtconsole_class));
+ vtconsole_class = NULL;
+ }
+
+ /* Add system drivers to sysfs */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con = &registered_con_driver[i];
+
+ if (con->con && !con->class_dev) {
+ con->class_dev =
+ class_device_create(vtconsole_class, NULL,
+ MKDEV(0, con->node), NULL,
+ "vtcon%i", con->node);
+
+ if (IS_ERR(con->class_dev)) {
+ printk(KERN_WARNING "Unable to create "
+ "class_device for %s; errno = %ld\n",
+ con->desc, PTR_ERR(con->class_dev));
+ con->class_dev = NULL;
+ } else {
+ vtconsole_init_class_device(con);
+ }
}
+ }
+
+ return 0;
}
+postcore_initcall(vtconsole_class_init);
#endif
diff --git a/drivers/char/watchdog/at91_wdt.c b/drivers/char/watchdog/at91_wdt.c
index ac83bc4b019a..00080655533d 100644
--- a/drivers/char/watchdog/at91_wdt.c
+++ b/drivers/char/watchdog/at91_wdt.c
@@ -17,14 +17,15 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <asm/bitops.h>
#include <asm/uaccess.h>
-#define WDT_DEFAULT_TIME 5 /* 5 seconds */
-#define WDT_MAX_TIME 256 /* 256 seconds */
+#define WDT_DEFAULT_TIME 5 /* seconds */
+#define WDT_MAX_TIME 256 /* seconds */
static int wdt_time = WDT_DEFAULT_TIME;
static int nowayout = WATCHDOG_NOWAYOUT;
@@ -32,8 +33,10 @@ static int nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_time, int, 0);
MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="__MODULE_STRING(WDT_DEFAULT_TIME) ")");
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#endif
static unsigned long at91wdt_busy;
@@ -138,7 +141,7 @@ static int at91_wdt_ioctl(struct inode *inode, struct file *file,
case WDIOC_SETTIMEOUT:
if (get_user(new_value, p))
return -EFAULT;
-
+
if (at91_wdt_settimeout(new_value))
return -EINVAL;
@@ -196,27 +199,84 @@ static struct miscdevice at91wdt_miscdev = {
.fops = &at91wdt_fops,
};
-static int __init at91_wdt_init(void)
+static int __init at91wdt_probe(struct platform_device *pdev)
{
int res;
- /* Check that the heartbeat value is within range; if not reset to the default */
- if (at91_wdt_settimeout(wdt_time)) {
- at91_wdt_settimeout(WDT_DEFAULT_TIME);
- printk(KERN_INFO "at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
- }
+ if (at91wdt_miscdev.dev)
+ return -EBUSY;
+ at91wdt_miscdev.dev = &pdev->dev;
res = misc_register(&at91wdt_miscdev);
if (res)
return res;
- printk("AT91 Watchdog Timer enabled (%d seconds, nowayout=%d)\n", wdt_time, nowayout);
+ printk("AT91 Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : "");
return 0;
}
+static int __exit at91wdt_remove(struct platform_device *pdev)
+{
+ int res;
+
+ res = misc_deregister(&at91wdt_miscdev);
+ if (!res)
+ at91wdt_miscdev.dev = NULL;
+
+ return res;
+}
+
+static void at91wdt_shutdown(struct platform_device *pdev)
+{
+ at91_wdt_stop();
+}
+
+#ifdef CONFIG_PM
+
+static int at91wdt_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ at91_wdt_stop();
+ return 0;
+}
+
+static int at91wdt_resume(struct platform_device *pdev)
+{
+ if (at91wdt_busy)
+ at91_wdt_start();
+ return 0;
+}
+
+#else
+#define at91wdt_suspend NULL
+#define at91wdt_resume NULL
+#endif
+
+static struct platform_driver at91wdt_driver = {
+ .probe = at91wdt_probe,
+ .remove = __exit_p(at91wdt_remove),
+ .shutdown = at91wdt_shutdown,
+ .suspend = at91wdt_suspend,
+ .resume = at91wdt_resume,
+ .driver = {
+ .name = "at91_wdt",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at91_wdt_init(void)
+{
+ /* Check that the heartbeat value is within range; if not reset to the default */
+ if (at91_wdt_settimeout(wdt_time)) {
+ at91_wdt_settimeout(WDT_DEFAULT_TIME);
+ pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
+ }
+
+ return platform_driver_register(&at91wdt_driver);
+}
+
static void __exit at91_wdt_exit(void)
{
- misc_deregister(&at91wdt_miscdev);
+ platform_driver_unregister(&at91wdt_driver);
}
module_init(at91_wdt_init);
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index fa2ba9ebe42a..bfbdbbf3c2f2 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -205,6 +205,23 @@ static int tco_timer_set_heartbeat (int t)
return 0;
}
+static int tco_timer_get_timeleft (int *time_left)
+{
+ unsigned char val;
+
+ spin_lock(&tco_lock);
+
+ /* read the TCO Timer */
+ val = inb (TCO1_RLD);
+ val &= 0x3f;
+
+ spin_unlock(&tco_lock);
+
+ *time_left = (int)((val * 6) / 10);
+
+ return 0;
+}
+
/*
* /dev/watchdog handling
*/
@@ -272,6 +289,7 @@ static int i8xx_tco_ioctl (struct inode *inode, struct file *file,
{
int new_options, retval = -EINVAL;
int new_heartbeat;
+ int time_left;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static struct watchdog_info ident = {
@@ -320,7 +338,7 @@ static int i8xx_tco_ioctl (struct inode *inode, struct file *file,
return -EFAULT;
if (tco_timer_set_heartbeat(new_heartbeat))
- return -EINVAL;
+ return -EINVAL;
tco_timer_keepalive ();
/* Fall */
@@ -329,6 +347,14 @@ static int i8xx_tco_ioctl (struct inode *inode, struct file *file,
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
+ case WDIOC_GETTIMELEFT:
+ {
+ if (tco_timer_get_timeleft(&time_left))
+ return -EINVAL;
+
+ return put_user(time_left, p);
+ }
+
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 2451edbefece..1f40ecefbf72 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -21,7 +21,7 @@
*/
/*
- * A bells and whistles driver is available from:
+ * A bells and whistles driver is available from:
* http://www.kernel.org/pub/linux/kernel/people/wim/pcwd/pcwd_pci/
*
* More info available at http://www.berkprod.com/ or http://www.pcwatchdog.com/
@@ -390,6 +390,24 @@ static int pcipcwd_get_temperature(int *temperature)
return 0;
}
+static int pcipcwd_get_timeleft(int *time_left)
+{
+ int msb;
+ int lsb;
+
+ /* Read the time that's left before rebooting */
+ /* Note: if the board is not yet armed then we will read 0xFFFF */
+ send_command(CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb);
+
+ *time_left = (msb << 8) + lsb;
+
+ if (debug >= VERBOSE)
+ printk(KERN_DEBUG PFX "Time left before next reboot: %d\n",
+ *time_left);
+
+ return 0;
+}
+
/*
* /dev/watchdog handling
*/
@@ -512,6 +530,16 @@ static int pcipcwd_ioctl(struct inode *inode, struct file *file,
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
+ case WDIOC_GETTIMELEFT:
+ {
+ int time_left;
+
+ if (pcipcwd_get_timeleft(&time_left))
+ return -EFAULT;
+
+ return put_user(time_left, p);
+ }
+
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 3fdfda9324fa..0d072bed501d 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -317,6 +317,19 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, int *temp
return 0;
}
+static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd, int *time_left)
+{
+ unsigned char msb, lsb;
+
+ /* Read the time that's left before rebooting */
+ /* Note: if the board is not yet armed then we will read 0xFFFF */
+ usb_pcwd_send_command(usb_pcwd, CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb);
+
+ *time_left = (msb << 8) + lsb;
+
+ return 0;
+}
+
/*
* /dev/watchdog handling
*/
@@ -422,6 +435,16 @@ static int usb_pcwd_ioctl(struct inode *inode, struct file *file,
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
+ case WDIOC_GETTIMELEFT:
+ {
+ int time_left;
+
+ if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left))
+ return -EFAULT;
+
+ return put_user(time_left, p);
+ }
+
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
new file mode 100644
index 000000000000..a52225470225
--- /dev/null
+++ b/drivers/clocksource/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
+obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
+obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
new file mode 100644
index 000000000000..7ad3be8c0f49
--- /dev/null
+++ b/drivers/clocksource/acpi_pm.c
@@ -0,0 +1,177 @@
+/*
+ * linux/drivers/clocksource/acpi_pm.c
+ *
+ * This file contains the ACPI PM based clocksource.
+ *
+ * This code was largely moved from the i386 timer_pm.c file
+ * which was (C) Dominik Brodowski <linux@brodo.de> 2003
+ * and contained the following comments:
+ *
+ * Driver to use the Power Management Timer (PMTMR) available in some
+ * southbridges as primary timing source for the Linux kernel.
+ *
+ * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
+ * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
+ *
+ * This file is licensed under the GPL v2.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* Number of PMTMR ticks expected during calibration run */
+#define PMTMR_TICKS_PER_SEC 3579545
+
+/*
+ * The I/O port the PMTMR resides at.
+ * The location is detected during setup_arch(),
+ * in arch/i386/acpi/boot.c
+ */
+u32 pmtmr_ioport __read_mostly;
+
+#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) /* limit it to 24 bits */
+
+static inline u32 read_pmtmr(void)
+{
+ /* mask the output to 24 bits */
+ return inl(pmtmr_ioport) & ACPI_PM_MASK;
+}
+
+static cycle_t acpi_pm_read_verified(void)
+{
+ u32 v1 = 0, v2 = 0, v3 = 0;
+
+ /*
+ * It has been reported that because of various broken
+ * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM clock
+ * source is not latched, you must read it multiple
+ * times to ensure a safe value is read:
+ */
+ do {
+ v1 = read_pmtmr();
+ v2 = read_pmtmr();
+ v3 = read_pmtmr();
+ } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
+ || (v3 > v1 && v3 < v2));
+
+ return (cycle_t)v2;
+}
+
+static cycle_t acpi_pm_read(void)
+{
+ return (cycle_t)read_pmtmr();
+}
+
+static struct clocksource clocksource_acpi_pm = {
+ .name = "acpi_pm",
+ .rating = 200,
+ .read = acpi_pm_read,
+ .mask = (cycle_t)ACPI_PM_MASK,
+ .mult = 0, /*to be caluclated*/
+ .shift = 22,
+ .is_continuous = 1,
+};
+
+
+#ifdef CONFIG_PCI
+static int acpi_pm_good;
+static int __init acpi_pm_good_setup(char *__str)
+{
+ acpi_pm_good = 1;
+ return 1;
+}
+__setup("acpi_pm_good", acpi_pm_good_setup);
+
+static inline void acpi_pm_need_workaround(void)
+{
+ clocksource_acpi_pm.read = acpi_pm_read_verified;
+ clocksource_acpi_pm.rating = 110;
+}
+
+/*
+ * PIIX4 Errata:
+ *
+ * The power management timer may return improper results when read.
+ * Although the timer value settles properly after incrementing,
+ * while incrementing there is a 3 ns window every 69.8 ns where the
+ * timer value is indeterminate (a 4.2% chance that the data will be
+ * incorrect when read). As a result, the ACPI free running count up
+ * timer specification is violated due to erroneous reads.
+ */
+static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
+{
+ u8 rev;
+
+ if (acpi_pm_good)
+ return;
+
+ pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
+ /* the bug has been fixed in PIIX4M */
+ if (rev < 3) {
+ printk(KERN_WARNING "* Found PM-Timer Bug on the chipset."
+ " Due to workarounds for a bug,\n"
+ "* this clock source is slow. Consider trying"
+ " other clock sources\n");
+
+ acpi_pm_need_workaround();
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
+ acpi_pm_check_blacklist);
+
+static void __devinit acpi_pm_check_graylist(struct pci_dev *dev)
+{
+ if (acpi_pm_good)
+ return;
+
+ printk(KERN_WARNING "* The chipset may have PM-Timer Bug. Due to"
+ " workarounds for a bug,\n"
+ "* this clock source is slow. If you are sure your timer"
+ " does not have\n"
+ "* this bug, please use \"acpi_pm_good\" to disable the"
+ " workaround\n");
+
+ acpi_pm_need_workaround();
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ acpi_pm_check_graylist);
+#endif
+
+
+static int __init init_acpi_pm_clocksource(void)
+{
+ u32 value1, value2;
+ unsigned int i;
+
+ if (!pmtmr_ioport)
+ return -ENODEV;
+
+ clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
+ clocksource_acpi_pm.shift);
+
+ /* "verify" this timing source: */
+ value1 = read_pmtmr();
+ for (i = 0; i < 10000; i++) {
+ value2 = read_pmtmr();
+ if (value2 == value1)
+ continue;
+ if (value2 > value1)
+ goto pm_good;
+ if ((value2 < value1) && ((value2) < 0xFFF))
+ goto pm_good;
+ printk(KERN_INFO "PM-Timer had inconsistent results:"
+ " 0x%#x, 0x%#x - aborting.\n", value1, value2);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "PM-Timer had no reasonable result:"
+ " 0x%#x - aborting.\n", value1);
+ return -ENODEV;
+
+pm_good:
+ return clocksource_register(&clocksource_acpi_pm);
+}
+
+module_init(init_acpi_pm_clocksource);
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
new file mode 100644
index 000000000000..bf4d3d50d1c4
--- /dev/null
+++ b/drivers/clocksource/cyclone.c
@@ -0,0 +1,119 @@
+#include <linux/clocksource.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+#include "mach_timer.h"
+
+#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
+#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
+#define CYCLONE_MPCS_OFFSET 0x51A8 /* offset to select register */
+#define CYCLONE_MPMC_OFFSET 0x51D0 /* offset to count register */
+#define CYCLONE_TIMER_FREQ 99780000 /* 100Mhz, but not really */
+#define CYCLONE_TIMER_MASK CLOCKSOURCE_MASK(32) /* 32 bit mask */
+
+int use_cyclone = 0;
+static void __iomem *cyclone_ptr;
+
+static cycle_t read_cyclone(void)
+{
+ return (cycle_t)readl(cyclone_ptr);
+}
+
+static struct clocksource clocksource_cyclone = {
+ .name = "cyclone",
+ .rating = 250,
+ .read = read_cyclone,
+ .mask = CYCLONE_TIMER_MASK,
+ .mult = 10,
+ .shift = 0,
+ .is_continuous = 1,
+};
+
+static int __init init_cyclone_clocksource(void)
+{
+ unsigned long base; /* saved value from CBAR */
+ unsigned long offset;
+ u32 __iomem* volatile cyclone_timer; /* Cyclone MPMC0 register */
+ u32 __iomem* reg;
+ int i;
+
+ /* make sure we're on a summit box: */
+ if (!use_cyclone)
+ return -ENODEV;
+
+ printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
+
+ /* find base address: */
+ offset = CYCLONE_CBAR_ADDR;
+ reg = ioremap_nocache(offset, sizeof(reg));
+ if (!reg) {
+ printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
+ return -ENODEV;
+ }
+ /* even on 64bit systems, this is only 32bits: */
+ base = readl(reg);
+ if (!base) {
+ printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
+ return -ENODEV;
+ }
+ iounmap(reg);
+
+ /* setup PMCC: */
+ offset = base + CYCLONE_PMCC_OFFSET;
+ reg = ioremap_nocache(offset, sizeof(reg));
+ if (!reg) {
+ printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
+ return -ENODEV;
+ }
+ writel(0x00000001,reg);
+ iounmap(reg);
+
+ /* setup MPCS: */
+ offset = base + CYCLONE_MPCS_OFFSET;
+ reg = ioremap_nocache(offset, sizeof(reg));
+ if (!reg) {
+ printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
+ return -ENODEV;
+ }
+ writel(0x00000001,reg);
+ iounmap(reg);
+
+ /* map in cyclone_timer: */
+ offset = base + CYCLONE_MPMC_OFFSET;
+ cyclone_timer = ioremap_nocache(offset, sizeof(u64));
+ if (!cyclone_timer) {
+ printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
+ return -ENODEV;
+ }
+
+ /* quick test to make sure its ticking: */
+ for (i = 0; i < 3; i++){
+ u32 old = readl(cyclone_timer);
+ int stall = 100;
+
+ while (stall--)
+ barrier();
+
+ if (readl(cyclone_timer) == old) {
+ printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
+ iounmap(cyclone_timer);
+ cyclone_timer = NULL;
+ return -ENODEV;
+ }
+ }
+ cyclone_ptr = cyclone_timer;
+
+ /* sort out mult/shift values: */
+ clocksource_cyclone.shift = 22;
+ clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
+ clocksource_cyclone.shift);
+
+ return clocksource_register(&clocksource_cyclone);
+}
+
+module_init(init_cyclone_clocksource);
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
new file mode 100644
index 000000000000..d418b8297211
--- /dev/null
+++ b/drivers/clocksource/scx200_hrt.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2006 Jim Cromie
+ *
+ * This is a clocksource driver for the Geode SCx200's 1 or 27 MHz
+ * high-resolution timer. The Geode SC-1100 (at least) has a buggy
+ * time stamp counter (TSC), which loses time unless 'idle=poll' is
+ * given as a boot-arg. In its absence, the Generic Timekeeping code
+ * will detect and de-rate the bad TSC, allowing this timer to take
+ * over timekeeping duties.
+ *
+ * Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/scx200.h>
+
+#define NAME "scx200_hrt"
+
+static int mhz27;
+module_param(mhz27, int, 0); /* load time only */
+MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)");
+
+static int ppm;
+module_param(ppm, int, 0); /* load time only */
+MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
+
+/* HiRes Timer configuration register address */
+#define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5)
+
+/* and config settings */
+#define HR_TMEN (1 << 0) /* timer interrupt enable */
+#define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */
+#define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */
+
+/* The base timer frequency, * 27 if selected */
+#define HRT_FREQ 1000000
+
+static cycle_t read_hrt(void)
+{
+ /* Read the timer value */
+ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
+}
+
+#define HRT_SHIFT_1 22
+#define HRT_SHIFT_27 26
+
+static struct clocksource cs_hrt = {
+ .name = "scx200_hrt",
+ .rating = 250,
+ .read = read_hrt,
+ .mask = CLOCKSOURCE_MASK(32),
+ .is_continuous = 1,
+ /* mult, shift are set based on mhz27 flag */
+};
+
+static int __init init_hrt_clocksource(void)
+{
+ /* Make sure scx200 has initializedd the configuration block */
+ if (!scx200_cb_present())
+ return -ENODEV;
+
+ /* Reserve the timer's ISA io-region for ourselves */
+ if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET,
+ SCx200_TIMER_SIZE,
+ "NatSemi SCx200 High-Resolution Timer")) {
+ printk(KERN_WARNING NAME ": unable to lock timer region\n");
+ return -ENODEV;
+ }
+
+ /* write timer config */
+ outb(HR_TMEN | (mhz27) ? HR_TMCLKSEL : 0,
+ scx200_cb_base + SCx200_TMCNFG_OFFSET);
+
+ if (mhz27) {
+ cs_hrt.shift = HRT_SHIFT_27;
+ cs_hrt.mult = clocksource_hz2mult((HRT_FREQ + ppm) * 27,
+ cs_hrt.shift);
+ } else {
+ cs_hrt.shift = HRT_SHIFT_1;
+ cs_hrt.mult = clocksource_hz2mult(HRT_FREQ + ppm,
+ cs_hrt.shift);
+ }
+ printk(KERN_INFO "enabling scx200 high-res timer (%s MHz +%d ppm)\n",
+ mhz27 ? "27":"1", ppm);
+
+ return clocksource_register(&cs_hrt);
+}
+
+module_init(init_hrt_clocksource);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 44d1eca83a72..35e0b9ceecf7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1497,6 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
+#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -1532,10 +1533,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cpufreq_cpu_notifier =
+static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
{
.notifier_call = cpufreq_cpu_callback,
};
+#endif /* CONFIG_HOTPLUG_CPU */
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
@@ -1596,7 +1598,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
}
if (!ret) {
- register_cpu_notifier(&cpufreq_cpu_notifier);
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
dprintk("driver %s up and running\n", driver_data->name);
cpufreq_debug_enable_ratelimit();
}
@@ -1628,7 +1630,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
dprintk("unregistering driver %s\n", driver->name);
sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
- unregister_cpu_notifier(&cpufreq_cpu_notifier);
+ unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c576c0b3f452..145061b8472a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,7 +350,7 @@ __init cpufreq_stats_init(void)
return ret;
}
- register_cpu_notifier(&cpufreq_stat_cpu_notifier);
+ register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
@@ -368,7 +368,7 @@ __exit cpufreq_stats_exit(void)
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_cpu_notifier(&cpufreq_stat_cpu_notifier);
+ unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 5158a9db4bc5..17ee684144f9 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -60,15 +60,14 @@
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
struct aes_ctx {
- uint32_t e_data[AES_EXTENDED_KEY_SIZE];
- uint32_t d_data[AES_EXTENDED_KEY_SIZE];
struct {
struct cword encrypt;
struct cword decrypt;
} cword;
- uint32_t *E;
- uint32_t *D;
+ u32 *D;
int key_length;
+ u32 E[AES_EXTENDED_KEY_SIZE];
+ u32 d_data[AES_EXTENDED_KEY_SIZE];
};
/* ====== Key management routines ====== */
@@ -282,19 +281,20 @@ aes_hw_extkey_available(uint8_t key_len)
return 0;
}
-static inline struct aes_ctx *aes_ctx(void *ctx)
+static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
{
+ unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
unsigned long align = PADLOCK_ALIGNMENT;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
- return (struct aes_ctx *)ALIGN((unsigned long)ctx, align);
+ return (struct aes_ctx *)ALIGN(addr, align);
}
-static int
-aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags)
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len, u32 *flags)
{
- struct aes_ctx *ctx = aes_ctx(ctx_arg);
+ struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
@@ -312,8 +312,7 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
* itself we must supply the plain key for both encryption
* and decryption.
*/
- ctx->E = ctx->e_data;
- ctx->D = ctx->e_data;
+ ctx->D = ctx->E;
E_KEY[0] = le32_to_cpu(key[0]);
E_KEY[1] = le32_to_cpu(key[1]);
@@ -414,24 +413,22 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
return iv;
}
-static void
-aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct aes_ctx *ctx = aes_ctx(ctx_arg);
+ struct aes_ctx *ctx = aes_ctx(tfm);
padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
}
-static void
-aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct aes_ctx *ctx = aes_ctx(ctx_arg);
+ struct aes_ctx *ctx = aes_ctx(tfm);
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
}
static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm));
+ struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -440,7 +437,7 @@ static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm));
+ struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -449,7 +446,7 @@ static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm));
+ struct aes_ctx *ctx = aes_ctx(desc->tfm);
u8 *iv;
iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
@@ -462,7 +459,7 @@ static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm));
+ struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0fdf7fbd6495..2801d14a5e42 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -824,10 +824,9 @@ static int __init ioat_init_module(void)
{
/* it's currently unsafe to unload this module */
/* if forced, worst case is that rmmod hangs */
- if (THIS_MODULE != NULL)
- THIS_MODULE->unsafe = 1;
+ __unsafe(THIS_MODULE);
- return pci_module_init(&ioat_pci_drv);
+ pci_module_init(&ioat_pci_drv);
}
module_init(ioat_init_module);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 65b2709f750c..facc1ccb8338 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -341,7 +341,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 94be3d797e61..a6ce7abf8602 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -358,7 +358,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index f72120d08c4c..b4ccdfc01203 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -253,7 +253,7 @@ set_fan(min2, fan_min[1], LM80_REG_FAN_MIN(2), fan_div[1]);
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index e229daf666de..e6c1b638c971 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -421,7 +421,7 @@ static void set_fan_min(struct device *dev, const char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
- of least suprise; the user doesn't expect the fan minimum to change just
+ of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6f3fda73f70c..063f71c5f07e 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -380,7 +380,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 7732aec54594..825e8f72698f 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -207,7 +207,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
- of least suprise; the user doesn't expect the fan minimum to change just
+ of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 71fb7f1af8f5..79368d53c363 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -781,7 +781,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index e4c700356c44..7be469ed0f8f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -630,7 +630,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 4ef884c216e2..e407c74bda35 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -463,7 +463,7 @@ show_fan_div(struct device *dev, struct device_attribute *attr,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
- least suprise; the user doesn't expect the fan minimum to change just
+ least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3e0d04d5a800..8b46ef7d9ff8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -488,7 +488,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
dev_err(&dev->dev, "SMBus base address uninitialized, "
"upgrade BIOS\n");
err = -ENODEV;
- goto exit_disable;
+ goto exit;
}
err = pci_request_region(dev, SMBBAR, i801_driver.name);
@@ -496,7 +496,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
dev_err(&dev->dev, "Failed to request SMBus region "
"0x%lx-0x%lx\n", i801_smba,
pci_resource_end(dev, SMBBAR));
- goto exit_disable;
+ goto exit;
}
pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
@@ -520,11 +520,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
err = i2c_add_adapter(&i801_adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
- goto exit_disable;
+ goto exit_release;
}
+ return 0;
-exit_disable:
- pci_disable_device(dev);
+exit_release:
+ pci_release_region(dev, SMBBAR);
exit:
return err;
}
@@ -533,7 +534,10 @@ static void __devexit i801_remove(struct pci_dev *dev)
{
i2c_del_adapter(&i801_adapter);
pci_release_region(dev, SMBBAR);
- pci_disable_device(dev);
+ /*
+ * do not call pci_disable_device(dev) since it can cause hard hangs on
+ * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
+ */
}
static struct pci_driver i801_driver = {
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a5017de72da5..f033d732f387 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -37,7 +37,7 @@
* Version 1.15 convert all calls to ide_raw_taskfile
* since args will return register content.
* Version 1.16 added suspend-resume-checkpower
- * Version 1.17 do flush on standy, do flush on ATA < ATA6
+ * Version 1.17 do flush on standby, do flush on ATA < ATA6
* fix wcache setup.
*/
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 622a55c72f03..26ceab1e90bb 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -505,7 +505,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
}
}
- if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ)
+ if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0)
try_to_flush_leftover_data(drive);
if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
@@ -959,7 +959,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
- rc = ide_wait_not_busy(HWIF(drive), 10000);
+ rc = ide_wait_not_busy(HWIF(drive), 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
@@ -1665,7 +1665,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
* Initialize a request before we fill it in and send it down to
* ide_do_drive_cmd. Commands must be set up by this function. Right
* now it doesn't do a lot, but if that changes abusers will have a
- * nasty suprise.
+ * nasty surprise.
*/
void ide_init_drive_cmd (struct request *rq)
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 97a49e77a8f1..32117f0ec5c0 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -597,6 +597,10 @@ u8 eighty_ninty_three (ide_drive_t *drive)
{
if(HWIF(drive)->udma_four == 0)
return 0;
+
+ /* Check for SATA but only if we are ATA5 or higher */
+ if (drive->id->hw_config == 0 && (drive->id->major_rev_num & 0x7FE0))
+ return 1;
if (!(drive->id->hw_config & 0x6000))
return 0;
#ifndef CONFIG_IDEDMA_IVB
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 16a143133f93..7ddb11828731 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -485,7 +485,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
unsigned long flags;
u8 err = 0;
- local_irq_set(flags);
+ local_irq_save(flags);
printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
if (stat & BUSY_STAT)
printk("Busy ");
@@ -567,7 +567,7 @@ static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat)
status.all = stat;
error.all = 0;
- local_irq_set(flags);
+ local_irq_save(flags);
printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
if (status.b.bsy)
printk("Busy ");
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index 2fcfac6e967a..c0864b1e9228 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -220,6 +220,12 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
return -EINVAL;
/*
+ * Copy the timing from the table.
+ */
+
+ *t = *s;
+
+/*
* If the drive is an EIDE drive, it can tell us it needs extended
* PIO/MWDMA cycle timing.
*/
@@ -247,7 +253,7 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
* Convert the timing to bus clock counts.
*/
- ide_timing_quantize(s, t, T, UT);
+ ide_timing_quantize(t, t, T, UT);
/*
* Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index c743e68c33aa..3edd7060510f 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -22,7 +22,7 @@ struct chipset_bus_clock_list_entry {
u8 ultra_settings;
};
-static struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
+static const struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
{ XFER_UDMA_6, 0x31, 0x07 },
{ XFER_UDMA_5, 0x31, 0x06 },
{ XFER_UDMA_4, 0x31, 0x05 },
@@ -42,7 +42,7 @@ static struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
{ 0, 0x00, 0x00 }
};
-static struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
+static const struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
{ XFER_UDMA_6, 0x41, 0x06 },
{ XFER_UDMA_5, 0x41, 0x05 },
{ XFER_UDMA_4, 0x41, 0x04 },
@@ -254,7 +254,8 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
if (dev->resource[PCI_ROM_RESOURCE].start) {
pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start);
+ printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
+ (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
}
if (bus_speed <= 33)
@@ -425,12 +426,12 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
return d->init_setup(dev, d);
}
-static struct pci_device_id aec62xx_pci_tbl[] = {
- { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+static const struct pci_device_id aec62xx_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 6e9dbf4d8077..85007cb12c52 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -75,6 +75,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
+ { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
{ 0 }
};
@@ -490,7 +491,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
/* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
/* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
- /* 18 */ DECLARE_AMD_DEV("AMD5536"),
+ /* 18 */ DECLARE_NV_DEV("NFORCE-MCP65"),
+ /* 19 */ DECLARE_AMD_DEV("AMD5536"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -528,7 +530,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 3d9c7afc8695..92b7b1549b16 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -190,14 +190,6 @@ static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) */
/*
- * Registers and masks for easy access by drive index:
- */
-#if 0
-static u8 prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23};
-static u8 prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3};
-#endif
-
-/*
* This routine writes the prepared setup/active/recovery counts
* for a drive into the cmd646 chipset registers to active them.
*/
@@ -606,13 +598,6 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
-#ifdef __i386__
- if (dev->resource[PCI_ROM_RESOURCE].start) {
- pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start);
- }
-#endif
-
switch(dev->device) {
case PCI_DEVICE_ID_CMD_643:
break;
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index be334da7a754..7da550281cf2 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -176,7 +176,7 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
pci_write_config_dword(dev, PCI_ROM_ADDRESS,
dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
printk(KERN_INFO "HPT345: ROM enabled at 0x%08lx\n",
- dev->resource[PCI_ROM_RESOURCE].start);
+ (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
}
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
} else {
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index acd63173199b..5a8334d134fb 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -313,8 +313,8 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
if (dev->resource[PCI_ROM_RESOURCE].start) {
pci_write_config_dword(dev, PCI_ROM_ADDRESS,
dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n",
- name, dev->resource[PCI_ROM_RESOURCE].start);
+ printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
+ (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
}
#ifdef CONFIG_PPC_PMAC
@@ -338,6 +338,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
hwif->ultra_mask = 0x7f;
hwif->mwdma_mask = 0x07;
+ hwif->err_stops_fifo = 1;
+
hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
hwif->ide_dma_lostirq = &pdcnew_ide_dma_lostirq;
hwif->ide_dma_timeout = &pdcnew_ide_dma_timeout;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 7ce5bf783688..1e209d8f9437 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -101,31 +101,6 @@ static const char *pdc_quirk_drives[] = {
#define MC1 0x02 /* DMA"C" timing */
#define MC0 0x01 /* DMA"C" timing */
-#if 0
- unsigned long bibma = pci_resource_start(dev, 4);
- u8 hi = 0, lo = 0;
-
- u8 sc1c = inb_p((u16)bibma + 0x1c);
- u8 sc1e = inb_p((u16)bibma + 0x1e);
- u8 sc1f = inb_p((u16)bibma + 0x1f);
-
- p += sprintf(p, "Host Mode : %s\n",
- (sc1f & 0x08) ? "Tri-Stated" : "Normal");
- p += sprintf(p, "Bus Clocking : %s\n",
- ((sc1f & 0xC0) == 0xC0) ? "100 External" :
- ((sc1f & 0x80) == 0x80) ? "66 External" :
- ((sc1f & 0x40) == 0x40) ? "33 External" : "33 PCI Internal");
- p += sprintf(p, "IO pad select : %s mA\n",
- ((sc1c & 0x03) == 0x03) ? "10" :
- ((sc1c & 0x02) == 0x02) ? "8" :
- ((sc1c & 0x01) == 0x01) ? "6" :
- ((sc1c & 0x00) == 0x00) ? "4" : "??");
- hi = sc1e >> 4;
- lo = sc1e & 0xf;
- p += sprintf(p, "Status Polling Period : %d\n", hi);
- p += sprintf(p, "Interrupt Check Status Polling Delay : %d\n", lo);
-#endif
-
static u8 pdc202xx_ratemask (ide_drive_t *drive)
{
u8 mode;
@@ -370,7 +345,6 @@ chipset_is_set:
if (!(speed)) {
/* restore original pci-config space */
pci_write_config_dword(dev, drive_pci, drive_conf);
- hwif->tuneproc(drive, 5);
return 0;
}
@@ -415,8 +389,6 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
if (drive->addressing == 1) {
struct request *rq = HWGROUP(drive)->rq;
ide_hwif_t *hwif = HWIF(drive);
-// struct pci_dev *dev = hwif->pci_dev;
-// unsgned long high_16 = pci_resource_start(dev, 4);
unsigned long high_16 = hwif->dma_master;
unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
u32 word_count = 0;
@@ -436,7 +408,6 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
{
if (drive->addressing == 1) {
ide_hwif_t *hwif = HWIF(drive);
-// unsigned long high_16 = pci_resource_start(hwif->pci_dev, 4);
unsigned long high_16 = hwif->dma_master;
unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
u8 clock = 0;
@@ -453,8 +424,6 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
-// struct pci_dev *dev = hwif->pci_dev;
-// unsigned long high_16 = pci_resource_start(dev, 4);
unsigned long high_16 = hwif->dma_master;
u8 dma_stat = hwif->INB(hwif->dma_status);
u8 sc1d = hwif->INB((high_16 + 0x001d));
@@ -492,12 +461,7 @@ static int pdc202xx_ide_dma_timeout(ide_drive_t *drive)
static void pdc202xx_reset_host (ide_hwif_t *hwif)
{
-#ifdef CONFIG_BLK_DEV_IDEDMA
-// unsigned long high_16 = hwif->dma_base - (8*(hwif->channel));
unsigned long high_16 = hwif->dma_master;
-#else /* !CONFIG_BLK_DEV_IDEDMA */
- unsigned long high_16 = pci_resource_start(hwif->pci_dev, 4);
-#endif /* CONFIG_BLK_DEV_IDEDMA */
u8 udma_speed_flag = hwif->INB(high_16|0x001f);
hwif->OUTB((udma_speed_flag | 0x10), (high_16|0x001f));
@@ -516,98 +480,20 @@ static void pdc202xx_reset (ide_drive_t *drive)
pdc202xx_reset_host(hwif);
pdc202xx_reset_host(mate);
-#if 0
- /*
- * FIXME: Have to kick all the drives again :-/
- * What a pain in the ACE!
- */
- if (hwif->present) {
- u16 hunit = 0;
- for (hunit = 0; hunit < MAX_DRIVES; ++hunit) {
- ide_drive_t *hdrive = &hwif->drives[hunit];
- if (hdrive->present) {
- if (hwif->ide_dma_check)
- hwif->ide_dma_check(hdrive);
- else
- hwif->tuneproc(hdrive, 5);
- }
- }
- }
- if (mate->present) {
- u16 munit = 0;
- for (munit = 0; munit < MAX_DRIVES; ++munit) {
- ide_drive_t *mdrive = &mate->drives[munit];
- if (mdrive->present) {
- if (mate->ide_dma_check)
- mate->ide_dma_check(mdrive);
- else
- mate->tuneproc(mdrive, 5);
- }
- }
- }
-#else
hwif->tuneproc(drive, 5);
-#endif
-}
-
-/*
- * Since SUN Cobalt is attempting to do this operation, I should disclose
- * this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
- * HOTSWAP ATA Infrastructure.
- */
-static int pdc202xx_tristate (ide_drive_t * drive, int state)
-{
- ide_hwif_t *hwif = HWIF(drive);
-// unsigned long high_16 = hwif->dma_base - (8*(hwif->channel));
- unsigned long high_16 = hwif->dma_master;
- u8 sc1f = hwif->INB(high_16|0x001f);
-
- if (!hwif)
- return -EINVAL;
-
-// hwif->bus_state = state;
-
- if (state) {
- hwif->OUTB(sc1f | 0x08, (high_16|0x001f));
- } else {
- hwif->OUTB(sc1f & ~0x08, (high_16|0x001f));
- }
- return 0;
}
-static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
+ const char *name)
{
+ /* This doesn't appear needed */
if (dev->resource[PCI_ROM_RESOURCE].start) {
pci_write_config_dword(dev, PCI_ROM_ADDRESS,
dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n",
- name, dev->resource[PCI_ROM_RESOURCE].start);
+ printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
+ (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
}
- /*
- * software reset - this is required because the bios
- * will set UDMA timing on if the hdd supports it. The
- * user may want to turn udma off. A bug in the pdc20262
- * is that it cannot handle a downgrade in timing from
- * UDMA to DMA. Disk accesses after issuing a set
- * feature command will result in errors. A software
- * reset leaves the timing registers intact,
- * but resets the drives.
- */
-#if 0
- if ((dev->device == PCI_DEVICE_ID_PROMISE_20267) ||
- (dev->device == PCI_DEVICE_ID_PROMISE_20265) ||
- (dev->device == PCI_DEVICE_ID_PROMISE_20263) ||
- (dev->device == PCI_DEVICE_ID_PROMISE_20262)) {
- unsigned long high_16 = pci_resource_start(dev, 4);
- byte udma_speed_flag = inb(high_16 + 0x001f);
- outb(udma_speed_flag | 0x10, high_16 + 0x001f);
- mdelay(100);
- outb(udma_speed_flag & ~0x10, high_16 + 0x001f);
- mdelay(2000); /* 2 seconds ?! */
- }
-
-#endif
return dev->irq;
}
@@ -624,10 +510,8 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
hwif->tuneproc = &config_chipset_for_pio;
hwif->quirkproc = &pdc202xx_quirkproc;
- if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) {
- hwif->busproc = &pdc202xx_tristate;
+ if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246)
hwif->resetproc = &pdc202xx_reset;
- }
hwif->speedproc = &pdc202xx_tune_chipset;
@@ -637,6 +521,8 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
hwif->mwdma_mask = 0x07;
hwif->swdma_mask = 0x07;
+ hwif->err_stops_fifo = 1;
+
hwif->ide_dma_check = &pdc202xx_config_drive_xfer_rate;
hwif->ide_dma_lostirq = &pdc202xx_ide_dma_lostirq;
hwif->ide_dma_timeout = &pdc202xx_ide_dma_timeout;
@@ -725,19 +611,6 @@ static int __devinit init_setup_pdc202ata4(struct pci_dev *dev,
"mirror fixed.\n", d->name);
}
}
-
-#if 0
- if (dev->device == PCI_DEVICE_ID_PROMISE_20262)
- if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
- (tmp & e->mask) != e->val))
-
- if (d->enablebits[0].reg != d->enablebits[1].reg) {
- d->enablebits[0].reg = d->enablebits[1].reg;
- d->enablebits[0].mask = d->enablebits[1].mask;
- d->enablebits[0].val = d->enablebits[1].val;
- }
-#endif
-
return ide_setup_pci_device(dev, d);
}
@@ -752,22 +625,6 @@ static int __devinit init_setup_pdc20265(struct pci_dev *dev,
"attached to I2O RAID controller.\n");
return -ENODEV;
}
-
-#if 0
- {
- u8 pri = 0, sec = 0;
-
- if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
- (tmp & e->mask) != e->val))
-
- if (d->enablebits[0].reg != d->enablebits[1].reg) {
- d->enablebits[0].reg = d->enablebits[1].reg;
- d->enablebits[0].mask = d->enablebits[1].mask;
- d->enablebits[0].val = d->enablebits[1].val;
- }
- }
-#endif
-
return ide_setup_pci_device(dev, d);
}
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index e9b83e1a3028..7fac6f57b5d6 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -222,6 +222,8 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
unsigned long flags;
u16 master_data;
u8 slave_data;
+ static DEFINE_SPINLOCK(tune_lock);
+
/* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
@@ -230,7 +232,13 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
{ 2, 3 }, };
pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
- spin_lock_irqsave(&ide_lock, flags);
+
+ /*
+ * Master vs slave is synchronized above us but the slave register is
+ * shared by the two hwifs so the corner case of two slave timeouts in
+ * parallel must be locked.
+ */
+ spin_lock_irqsave(&tune_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
master_data = master_data | 0x4000;
@@ -250,7 +258,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&ide_lock, flags);
+ spin_unlock_irqrestore(&tune_lock, flags);
}
/**
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 24e21b2838c1..778b82ae964d 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -395,7 +395,6 @@ static int sc1200_resume (struct pci_dev *dev)
{
ide_hwif_t *hwif = NULL;
-printk("SC1200: resume\n");
pci_set_power_state(dev, PCI_D0); // bring chip back from sleep state
dev->current_state = PM_EVENT_ON;
pci_enable_device(dev);
@@ -405,7 +404,6 @@ printk("SC1200: resume\n");
while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
unsigned int basereg, r, d, format;
sc1200_saved_state_t *ss = (sc1200_saved_state_t *)hwif->config_data;
-printk("%s: SC1200: resume\n", hwif->name);
//
// Restore timing registers: this may be unnecessary if BIOS also does it
@@ -493,7 +491,7 @@ static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_devic
}
static struct pci_device_id sc1200_pci_tbl[] = {
- { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, sc1200_pci_tbl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 0d3073f4eab4..5100b827a935 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -123,11 +123,11 @@ static u8 svwks_csb_check (struct pci_dev *dev)
}
static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
- u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
- u8 dma_modes[] = { 0x77, 0x21, 0x20 };
- u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
- u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
- u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
+ static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
+ static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
+ static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+ static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
+ static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
@@ -392,16 +392,6 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
}
outb_p(0x06, 0x0c00);
dev->irq = inb_p(0x0c01);
-#if 0
- printk("%s: device class (0x%04x)\n",
- name, dev->class);
- if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) {
- dev->class &= ~0x000F0F00;
- // dev->class |= ~0x00000400;
- dev->class |= ~0x00010100;
- /**/
- }
-#endif
} else {
struct pci_dev * findev = NULL;
u8 reg41 = 0;
@@ -452,7 +442,7 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
pci_write_config_byte(dev, 0x5A, btr);
}
- return (dev->irq) ? dev->irq : 0;
+ return dev->irq;
}
static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif)
@@ -500,11 +490,6 @@ static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
- /* Per Specified Design by OEM, and ASIC Architect */
- if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
- return 1;
-
/* Server Works */
if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS)
return ata66_svwks_svwks (hwif);
@@ -517,10 +502,14 @@ static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN)
return ata66_svwks_cobalt (hwif);
+ /* Per Specified Design by OEM, and ASIC Architect */
+ if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
+ (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
+ return 1;
+
return 0;
}
-#undef CAN_SW_DMA
static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
{
u8 dma_stat = 0;
@@ -537,9 +526,6 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
hwif->ultra_mask = 0x3f;
hwif->mwdma_mask = 0x07;
-#ifdef CAN_SW_DMA
- hwif->swdma_mask = 0x07;
-#endif /* CAN_SW_DMA */
hwif->autodma = 0;
@@ -562,8 +548,6 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
hwif->drives[1].autodma = (dma_stat & 0x40);
hwif->drives[0].autotune = (!(dma_stat & 0x20));
hwif->drives[1].autotune = (!(dma_stat & 0x40));
-// hwif->drives[0].autodma = hwif->autodma;
-// hwif->drives[1].autodma = hwif->autodma;
}
/*
@@ -593,11 +577,6 @@ static int __devinit init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d)
if (dev->resource[0].start == 0x01f1)
d->bootable = ON_BOARD;
}
-#if 0
- if ((IDE_PCI_DEVID_EQ(d->devid, DEVID_CSB6) &&
- (!(PCI_FUNC(dev->devfn) & 1)))
- d->autodma = AUTODMA;
-#endif
d->channels = ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE ||
dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) &&
@@ -671,11 +650,11 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
}
static struct pci_device_id svwks_pci_tbl[] = {
- { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
- { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
- { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 1},
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 3},
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index f1ca154dd52c..72dade14c725 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -38,9 +38,6 @@
#include <asm/io.h>
-#undef SIIMAGE_VIRTUAL_DMAPIO
-#undef SIIMAGE_LARGE_DMA
-
/**
* pdev_is_sata - check if device is SATA
* @pdev: PCI device to check
@@ -461,36 +458,6 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
return 0;
}
-#if 0
-/**
- * siimage_mmio_ide_dma_count - DMA bytes done
- * @drive
- *
- * If we are doing VDMA the CMD680 requires a little bit
- * of more careful handling and we have to read the counts
- * off ourselves. For non VDMA life is normal.
- */
-
-static int siimage_mmio_ide_dma_count (ide_drive_t *drive)
-{
-#ifdef SIIMAGE_VIRTUAL_DMAPIO
- struct request *rq = HWGROUP(drive)->rq;
- ide_hwif_t *hwif = HWIF(drive);
- u32 count = (rq->nr_sectors * SECTOR_SIZE);
- u32 rcount = 0;
- unsigned long addr = siimage_selreg(hwif, 0x1C);
-
- hwif->OUTL(count, addr);
- rcount = hwif->INL(addr);
-
- printk("\n%s: count = %d, rcount = %d, nr_sectors = %lu\n",
- drive->name, count, rcount, rq->nr_sectors);
-
-#endif /* SIIMAGE_VIRTUAL_DMAPIO */
- return __ide_dma_count(drive);
-}
-#endif
-
/**
* siimage_mmio_ide_dma_test_irq - check we caused an IRQ
* @drive: drive we are testing
@@ -512,12 +479,10 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
u32 sata_error = hwif->INL(SATA_ERROR_REG);
hwif->OUTL(sata_error, SATA_ERROR_REG);
watchdog = (sata_error & 0x00680000) ? 1 : 0;
-#if 1
printk(KERN_WARNING "%s: sata_error = 0x%08x, "
"watchdog = %d, %s\n",
drive->name, sata_error, watchdog,
__FUNCTION__);
-#endif
} else {
watchdog = (ext_stat & 0x8000) ? 1 : 0;
@@ -863,7 +828,7 @@ static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const ch
* time.
*
* The hardware supports buffered taskfiles and also some rather nice
- * extended PRD tables. Unfortunately right now we don't.
+ * extended PRD tables. For better SI3112 support use the libata driver
*/
static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
@@ -900,9 +865,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* so we can't currently use it sanely since we want to
* use LBA48 mode.
*/
-// base += 0x10;
-// hwif->no_lba48 = 1;
-
hw.io_ports[IDE_DATA_OFFSET] = base;
hw.io_ports[IDE_ERROR_OFFSET] = base + 1;
hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2;
@@ -936,15 +898,8 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
base = (unsigned long) addr;
-#ifdef SIIMAGE_LARGE_DMA
-/* Watch the brackets - even Ken and Dennis get some language design wrong */
- hwif->dma_base = base + (ch ? 0x18 : 0x10);
- hwif->dma_base2 = base + (ch ? 0x08 : 0x00);
- hwif->dma_prdtable = hwif->dma_base2 + 4;
-#else /* ! SIIMAGE_LARGE_DMA */
hwif->dma_base = base + (ch ? 0x08 : 0x00);
hwif->dma_base2 = base + (ch ? 0x18 : 0x10);
-#endif /* SIIMAGE_LARGE_DMA */
hwif->mmio = 2;
}
@@ -1052,9 +1007,16 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
hwif->reset_poll = &siimage_reset_poll;
hwif->pre_reset = &siimage_pre_reset;
- if(is_sata(hwif))
+ if(is_sata(hwif)) {
+ static int first = 1;
+
hwif->busproc = &siimage_busproc;
+ if (first) {
+ printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
+ first = 0;
+ }
+ }
if (!hwif->dma_base) {
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
@@ -1121,10 +1083,10 @@ static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_devi
}
static struct pci_device_id siimage_pci_tbl[] = {
- { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), 0},
#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112), 1},
+ { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA), 2},
#endif
{ 0, },
};
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 8a5c7b286b2b..900301e43818 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -447,7 +447,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
printk(" %s: Winbond 553 bridge revision %d, BM-DMA disabled\n",
hwif->name, rev);
} else {
-#ifdef CONFIG_BLK_DEV_IDEDMA
dma_state |= 0x60;
hwif->atapi_dma = 1;
@@ -468,7 +467,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
if (hwif->mate)
hwif->serialized = hwif->mate->serialized = 1;
-#endif /* CONFIG_BLK_DEV_IDEDMA */
}
hwif->OUTB(dma_state, hwif->dma_base + 2);
}
@@ -489,7 +487,7 @@ static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_dev
}
static struct pci_device_id sl82c105_pci_tbl[] = {
- { PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, sl82c105_pci_tbl);
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 5112c726633b..0968f6bc669a 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -72,7 +72,8 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
u16 master_data;
u8 slave_data;
/* ISP RTC */
- u8 timings[][2] = { { 0, 0 },
+ static const u8 timings[][2]= {
+ { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
@@ -119,7 +120,6 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
pci_read_config_word(dev, 0x4a, &reg4a);
switch(speed) {
-#ifdef CONFIG_BLK_DEV_IDEDMA
case XFER_UDMA_4: u_speed = 4 << (drive->dn * 4); break;
case XFER_UDMA_3: u_speed = 3 << (drive->dn * 4); break;
case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break;
@@ -128,7 +128,6 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
case XFER_MW_DMA_2:
case XFER_MW_DMA_1:
case XFER_SW_DMA_2: break;
-#endif /* CONFIG_BLK_DEV_IDEDMA */
case XFER_PIO_4:
case XFER_PIO_3:
case XFER_PIO_2:
@@ -156,7 +155,6 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
return (ide_config_drive_speed(drive, speed));
}
-#ifdef CONFIG_BLK_DEV_IDEDMA
static int slc90e66_config_drive_for_dma (ide_drive_t *drive)
{
u8 speed = ide_dma_speed(drive, slc90e66_ratemask(drive));
@@ -194,7 +192,6 @@ fast_ata_pio:
/* IORDY not supported */
return 0;
}
-#endif /* CONFIG_BLK_DEV_IDEDMA */
static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
{
@@ -222,7 +219,6 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
hwif->mwdma_mask = 0x07;
hwif->swdma_mask = 0x07;
-#ifdef CONFIG_BLK_DEV_IDEDMA
if (!(hwif->udma_four))
/* bit[0(1)]: 0:80, 1:40 */
hwif->udma_four = (reg47 & mask) ? 0 : 1;
@@ -232,7 +228,6 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
hwif->autodma = 1;
hwif->drives[0].autodma = hwif->autodma;
hwif->drives[1].autodma = hwif->autodma;
-#endif /* !CONFIG_BLK_DEV_IDEDMA */
}
static ide_pci_device_t slc90e66_chipset __devinitdata = {
@@ -250,7 +245,7 @@ static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_dev
}
static struct pci_device_id slc90e66_pci_tbl[] = {
- { PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1), 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, slc90e66_pci_tbl);
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 5bda15904a08..2d5b57be98c3 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1074,8 +1074,7 @@ static inline int update_partial_datagram(struct list_head *pdgl, struct list_he
/* Move list entry to beginnig of list so that oldest partial
* datagrams percolate to the end of the list */
- list_del(lh);
- list_add(lh, pdgl);
+ list_move(lh, pdgl);
return 0;
}
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 2d47b11777a5..ad49c040b674 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -103,7 +103,7 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
* driver specific parts, enable the controller and make it available
* to the general subsystem using hpsb_add_host().
*
- * Return Value: a pointer to the &hpsb_host if succesful, %NULL if
+ * Return Value: a pointer to the &hpsb_host if successful, %NULL if
* no memory was available.
*/
static DEFINE_MUTEX(host_num_alloc);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index e7b55e895f50..0ecbf335c64f 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -139,7 +139,7 @@ int hpsb_bus_reset(struct hpsb_host *host);
/*
* Hand over received selfid packet to the core. Complement check (second
- * quadlet is complement of first) is expected to be done and succesful.
+ * quadlet is complement of first) is expected to be done and successful.
*/
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 3d278412e1ca..800c8d518430 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -590,11 +590,11 @@ static void ohci_initialize(struct ti_ohci *ohci)
buf = reg_read(ohci, OHCI1394_Version);
sprintf (irq_buf, "%d", ohci->dev->irq);
PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
- "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
+ "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
- pci_resource_start(ohci->dev, 0),
- pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
+ (unsigned long long)pci_resource_start(ohci->dev, 0),
+ (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
ohci->max_packet_size,
ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
@@ -3217,7 +3217,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
{
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
- unsigned long ohci_base;
+ resource_size_t ohci_base;
if (pci_enable_device(dev))
FAIL(-ENXIO, "Failed to enable OHCI hardware");
@@ -3270,15 +3270,16 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
* clearly says it's 2kb, so this shouldn't be a problem. */
ohci_base = pci_resource_start(dev, 0);
if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
- PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
- pci_resource_len(dev, 0));
+ PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
+ (unsigned long long)pci_resource_len(dev, 0));
/* Seems PCMCIA handles this internally. Not sure why. Seems
* pretty bogus to force a driver to special case this. */
#ifndef PCMCIA
if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
- FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
- ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
+ FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
+ (unsigned long long)ohci_base,
+ (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
#endif
ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 20ce539580f1..571ea68c0cf2 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -132,8 +132,7 @@ static void free_pending_request(struct pending_request *req)
static void __queue_complete_req(struct pending_request *req)
{
struct file_info *fi = req->file_info;
- list_del(&req->list);
- list_add_tail(&req->list, &fi->req_complete);
+ list_move_tail(&req->list, &fi->req_complete);
up(&fi->complete_sem);
wake_up_interruptible(&fi->poll_wait_complete);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index ba2d6505e9a4..69a53d476b5b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -41,4 +41,6 @@ source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/iser/Kconfig"
+
endmenu
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index eea27322a22d..c7ff58c1d0e5 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_IPATH_CORE) += hw/ipath/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
+obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a76834edf608..863f64befc7c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -476,7 +476,7 @@ static inline int cma_zero_addr(struct sockaddr *addr)
else {
ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
- ip6->s6_addr32[3] | ip6->s6_addr32[4]) == 0;
+ ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
}
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b38e02a5db35..5ed4dab52a6f 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1775,11 +1775,9 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
{
mad_send_wr->timeout = 0;
- if (mad_send_wr->refcount == 1) {
- list_del(&mad_send_wr->agent_list);
- list_add_tail(&mad_send_wr->agent_list,
+ if (mad_send_wr->refcount == 1)
+ list_move_tail(&mad_send_wr->agent_list,
&mad_send_wr->mad_agent_priv->done_list);
- }
}
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
@@ -2098,8 +2096,7 @@ retry:
queued_send_wr = container_of(mad_list,
struct ib_mad_send_wr_private,
mad_list);
- list_del(&mad_list->list);
- list_add_tail(&mad_list->list, &send_queue->list);
+ list_move_tail(&mad_list->list, &send_queue->list);
}
spin_unlock_irqrestore(&send_queue->lock, flags);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index d4704e054e30..ebcd5b181770 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -665,8 +665,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
goto out;
mad_send_wr->refcount++;
- list_del(&mad_send_wr->agent_list);
- list_add_tail(&mad_send_wr->agent_list,
+ list_move_tail(&mad_send_wr->agent_list,
&mad_send_wr->mad_agent_priv->send_list);
}
out:
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 76bf61e9b552..a908a7bdcd7f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1530,7 +1530,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
out_put:
put_qp_read(qp);
-out:
while (wr) {
if (is_ud && wr->wr.ud.ah)
put_ah_read(wr->wr.ud.ah);
@@ -1539,6 +1538,7 @@ out:
wr = next;
}
+out:
kfree(user_wr);
return ret ? ret : in_len;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e57d3c50f75f..e725cccc7cde 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -188,7 +188,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
ib_destroy_ah(ah);
- list_del(&uobj->list);
kfree(uobj);
}
@@ -200,7 +199,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
ib_uverbs_detach_umcast(qp, uqp);
ib_destroy_qp(qp);
- list_del(&uobj->list);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
@@ -213,7 +211,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
ib_destroy_cq(cq);
- list_del(&uobj->list);
ib_uverbs_release_ucq(file, ev_file, ucq);
kfree(ucq);
}
@@ -225,7 +222,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
ib_destroy_srq(srq);
- list_del(&uobj->list);
ib_uverbs_release_uevent(file, uevent);
kfree(uevent);
}
@@ -243,7 +239,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
memobj = container_of(uobj, struct ib_umem_object, uobject);
ib_umem_release_on_close(mrdev, &memobj->umem);
- list_del(&uobj->list);
kfree(memobj);
}
@@ -252,7 +247,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
ib_dealloc_pd(pd);
- list_del(&uobj->list);
kfree(uobj);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index dddcdae736ac..e4b897fa569a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -460,10 +460,10 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
for (j = 0; j < 6; j++) {
if (!pdev->resource[j].start)
continue;
- ipath_cdbg(VERBOSE, "BAR %d start %lx, end %lx, len %lx\n",
- j, pdev->resource[j].start,
- pdev->resource[j].end,
- pci_resource_len(pdev, j));
+ ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
+ j, (unsigned long long)pdev->resource[j].start,
+ (unsigned long long)pdev->resource[j].end,
+ (unsigned long long)pci_resource_len(pdev, j));
}
if (!addr) {
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 9b9ff7bff357..465fd220569c 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -172,8 +172,9 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
- "PCI resource 2 size of 0x%lx, aborting.\n",
- dev_lim->uar_size, pci_resource_len(mdev->pdev, 2));
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev_lim->uar_size,
+ (unsigned long long)pci_resource_len(mdev->pdev, 2));
return -ENODEV;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 216471fa01cc..ab40488182b3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -864,8 +864,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
if (mcast) {
/* Destroy the send only entry */
- list_del(&mcast->list);
- list_add_tail(&mcast->list, &remove_list);
+ list_move_tail(&mcast->list, &remove_list);
rb_replace_node(&mcast->rb_node,
&nmcast->rb_node,
@@ -890,8 +889,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
rb_erase(&mcast->rb_node, &priv->multicast_tree);
/* Move to the remove list */
- list_del(&mcast->list);
- list_add_tail(&mcast->list, &remove_list);
+ list_move_tail(&mcast->list, &remove_list);
}
}
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
new file mode 100644
index 000000000000..fead87d1eff9
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -0,0 +1,11 @@
+config INFINIBAND_ISER
+ tristate "ISCSI RDMA Protocol"
+ depends on INFINIBAND && SCSI
+ select SCSI_ISCSI_ATTRS
+ ---help---
+ Support for the ISCSI RDMA Protocol over InfiniBand. This
+ allows you to access storage devices that speak ISER/ISCSI
+ over InfiniBand.
+
+ The ISER protocol is defined by IETF.
+ See <http://www.ietf.org/>.
diff --git a/drivers/infiniband/ulp/iser/Makefile b/drivers/infiniband/ulp/iser/Makefile
new file mode 100644
index 000000000000..fe6cd15f2317
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o
+
+ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \
+ iscsi_iser.o
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
new file mode 100644
index 000000000000..4c3f2de2a06e
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -0,0 +1,790 @@
+/*
+ * iSCSI Initiator over iSER Data-Path
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 Mike Christie
+ * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * maintained by openib-general@openib.org
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Credits:
+ * Christoph Hellwig
+ * FUJITA Tomonori
+ * Arne Redlich
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
+ *
+ *
+ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/hardirq.h>
+#include <linux/kfifo.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/cdev.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_iser.h"
+
+static unsigned int iscsi_max_lun = 512;
+module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+int iser_debug_level = 0;
+
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover "
+ "v" DRV_VER " (" DRV_DATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+
+module_param_named(debug_level, iser_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+
+struct iser_global ig;
+
+void
+iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+{
+ int rc = 0;
+ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+ /* verify PDU length */
+ datalen = ntoh24(hdr->dlength);
+ if (datalen != rx_data_len) {
+ printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n",
+ datalen, rx_data_len);
+ rc = ISCSI_ERR_DATALEN;
+ goto error;
+ }
+
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+ /* verify itt (itt encoding: age+cid+itt) */
+ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
+
+ if (!rc)
+ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
+
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+ return;
+error:
+ iscsi_conn_failure(conn, rc);
+}
+
+
+/**
+ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+ **/
+static void
+iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct scsi_cmnd *sc = ctask->sc;
+
+ iser_ctask->command_sent = 0;
+ iser_ctask->iser_conn = iser_conn;
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ BUG_ON(ctask->total_length == 0);
+ /* bytes to be sent via RDMA operations */
+ iser_ctask->rdma_data_count = ctask->total_length -
+ ctask->imm_count -
+ ctask->unsol_count;
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d "
+ "rdma_data %d]\n",
+ ctask->itt, ctask->total_length, ctask->imm_count,
+ ctask->unsol_count, iser_ctask->rdma_data_count);
+ } else
+ /* bytes to be sent via RDMA operations */
+ iser_ctask->rdma_data_count = ctask->total_length;
+
+ iser_ctask_rdma_init(iser_ctask);
+}
+
+/**
+ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+ * call it again later, or recover. '0' return code means successful
+ * xmit.
+ *
+ **/
+static int
+iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask)
+{
+ int error = 0;
+
+ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+ error = iser_send_control(conn, mtask);
+
+ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+ * - if yes, the mtask is recycled at iscsi_complete_pdu
+ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -EAGAIN)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+ return error;
+}
+
+static int
+iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_data hdr;
+ int error = 0;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+ while (ctask->unsol_count > 0) {
+ iscsi_prep_unsolicit_data_pdu(ctask, &hdr,
+ iser_ctask->rdma_data_count);
+
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+ ctask->unsol_datasn--;
+ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+ ctask->unsol_count);
+ }
+
+iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+}
+
+static int
+iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ conn->id, ctask->itt);
+
+ /*
+ * serialize with TMF AbortTask
+ */
+ if (ctask->mtask)
+ return error;
+
+ /* Send the cmd PDU */
+ if (!iser_ctask->command_sent) {
+ error = iser_send_command(conn, ctask);
+ if (error)
+ goto iscsi_iser_ctask_xmit_exit;
+ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+ if (ctask->unsol_count)
+ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -EAGAIN)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+}
+
+static void
+iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
+ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
+ iser_ctask_rdma_finalize(iser_ctask);
+ }
+}
+
+static struct iser_conn *
+iscsi_iser_ib_conn_lookup(__u64 ep_handle)
+{
+ struct iser_conn *ib_conn;
+ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+ mutex_lock(&ig.connlist_mutex);
+ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
+ if (ib_conn == uib_conn) {
+ mutex_unlock(&ig.connlist_mutex);
+ return ib_conn;
+ }
+ }
+ mutex_unlock(&ig.connlist_mutex);
+ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
+ return NULL;
+}
+
+static struct iscsi_cls_conn *
+iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+
+ /*
+ * due to issues with the login code re iser sematics
+ * this not set in iscsi_conn_setup - FIXME
+ */
+ conn->max_recv_dlength = 128;
+
+ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
+ if (!iser_conn)
+ goto conn_alloc_fail;
+
+ /* currently this is the only field which need to be initiated */
+ rwlock_init(&iser_conn->lock);
+
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
+
+conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+static void
+iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+
+ iscsi_conn_teardown(cls_conn);
+ kfree(iser_conn);
+}
+
+static int
+iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (error)
+ return error;
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
+ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+ iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn);
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+
+ conn->recv_lock = &iser_conn->lock;
+
+ return 0;
+}
+
+static int
+iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ int err;
+
+ err = iscsi_conn_start(cls_conn);
+ if (err)
+ return err;
+
+ return iser_conn_set_full_featured_mode(conn);
+}
+
+static void
+iscsi_iser_conn_terminate(struct iscsi_conn *conn)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ BUG_ON(!ib_conn);
+ /* starts conn teardown process, waits until all previously *
+ * posted buffers get flushed, deallocates all conn resources */
+ iser_conn_terminate(ib_conn);
+ iser_conn->ib_conn = NULL;
+ conn->recv_lock = NULL;
+}
+
+
+static struct iscsi_transport iscsi_iser_transport;
+
+static struct iscsi_cls_session *
+iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct scsi_transport_template *scsit,
+ uint32_t initial_cmdsn, uint32_t *hostno)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ int i;
+ uint32_t hn;
+ struct iscsi_cmd_task *ctask;
+ struct iscsi_mgmt_task *mtask;
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iser_desc *desc;
+
+ cls_session = iscsi_session_setup(iscsit, scsit,
+ sizeof(struct iscsi_iser_cmd_task),
+ sizeof(struct iser_desc),
+ initial_cmdsn, &hn);
+ if (!cls_session)
+ return NULL;
+
+ *hostno = hn;
+ session = class_to_transport_session(cls_session);
+
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+ ctask = session->cmds[i];
+ iser_ctask = ctask->dd_data;
+ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+ }
+
+ for (i = 0; i < session->mgmtpool_max; i++) {
+ mtask = session->mgmt_cmds[i];
+ desc = mtask->dd_data;
+ mtask->hdr = &desc->iscsi_header;
+ desc->data = mtask->data;
+ }
+
+ return cls_session;
+}
+
+static int
+iscsi_iser_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, uint32_t value)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock_bh(&session->lock);
+ if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
+ conn->stop_stage != STOP_CONN_RECOVER) {
+ printk(KERN_ERR "iscsi_iser: can not change parameter [%d]\n",
+ param);
+ spin_unlock_bh(&session->lock);
+ return 0;
+ }
+ spin_unlock_bh(&session->lock);
+
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ /* TBD */
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ conn->max_xmit_dlength = value;
+ break;
+ case ISCSI_PARAM_HDRDGST_EN:
+ if (value) {
+ printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ if (value) {
+ printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ session->initial_r2t_en = value;
+ break;
+ case ISCSI_PARAM_IMM_DATA_EN:
+ session->imm_data_en = value;
+ break;
+ case ISCSI_PARAM_FIRST_BURST:
+ session->first_burst = value;
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ session->max_burst = value;
+ break;
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ session->pdu_inorder_en = value;
+ break;
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ session->dataseq_inorder_en = value;
+ break;
+ case ISCSI_PARAM_ERL:
+ session->erl = value;
+ break;
+ case ISCSI_PARAM_IFMARKER_EN:
+ if (value) {
+ printk(KERN_ERR "IFMarker wasn't negotiated to No");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_OFMARKER_EN:
+ if (value) {
+ printk(KERN_ERR "OFMarker wasn't negotiated to No");
+ return -EPROTO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+iscsi_iser_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, uint32_t *value)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ *value = session->initial_r2t_en;
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ *value = session->max_r2t;
+ break;
+ case ISCSI_PARAM_IMM_DATA_EN:
+ *value = session->imm_data_en;
+ break;
+ case ISCSI_PARAM_FIRST_BURST:
+ *value = session->first_burst;
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ *value = session->max_burst;
+ break;
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ *value = session->pdu_inorder_en;
+ break;
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ *value = session->dataseq_inorder_en;
+ break;
+ case ISCSI_PARAM_ERL:
+ *value = session->erl;
+ break;
+ case ISCSI_PARAM_IFMARKER_EN:
+ *value = 0;
+ break;
+ case ISCSI_PARAM_OFMARKER_EN:
+ *value = 0;
+ break;
+ default:
+ return ISCSI_ERR_PARAM_NOT_FOUND;
+ }
+
+ return 0;
+}
+
+static int
+iscsi_iser_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, uint32_t *value)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ switch(param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ *value = conn->max_recv_dlength;
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ *value = conn->max_xmit_dlength;
+ break;
+ case ISCSI_PARAM_HDRDGST_EN:
+ *value = 0;
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ *value = 0;
+ break;
+ /*case ISCSI_PARAM_TARGET_RECV_DLENGTH:
+ *value = conn->target_recv_dlength;
+ break;
+ case ISCSI_PARAM_INITIATOR_RECV_DLENGTH:
+ *value = conn->initiator_recv_dlength;
+ break;*/
+ default:
+ return ISCSI_ERR_PARAM_NOT_FOUND;
+ }
+
+ return 0;
+}
+
+
+static void
+iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt; /* always 0 */
+ stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->custom_length = 3;
+ strcpy(stats->custom[0].desc, "qp_tx_queue_full");
+ stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */
+ strcpy(stats->custom[1].desc, "fmr_map_not_avail");
+ stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */;
+ strcpy(stats->custom[2].desc, "eh_abort_cnt");
+ stats->custom[2].value = conn->eh_abort_cnt;
+}
+
+static int
+iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
+ __u64 *ep_handle)
+{
+ int err;
+ struct iser_conn *ib_conn;
+
+ err = iser_conn_init(&ib_conn);
+ if (err)
+ goto out;
+
+ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
+ if (!err)
+ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+out:
+ return err;
+}
+
+static int
+iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+{
+ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+ if (!ib_conn)
+ return -EINVAL;
+
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+
+ /* if conn establishment failed, return error code to iscsi */
+ if (!rc &&
+ (ib_conn->state == ISER_CONN_TERMINATING ||
+ ib_conn->state == ISER_CONN_DOWN))
+ rc = -1;
+
+ iser_err("ib conn %p rc = %d\n", ib_conn, rc);
+
+ if (rc > 0)
+ return 1; /* success, this is the equivalent of POLLOUT */
+ else if (!rc)
+ return 0; /* timeout */
+ else
+ return rc; /* signal */
+}
+
+static void
+iscsi_iser_ep_disconnect(__u64 ep_handle)
+{
+ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+
+ if (!ib_conn)
+ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+
+ iser_conn_terminate(ib_conn);
+}
+
+static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "iscsi_iser",
+ .this_id = -1,
+};
+
+static struct iscsi_transport iscsi_iser_transport = {
+ .owner = THIS_MODULE,
+ .name = "iser",
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
+ .param_mask = ISCSI_MAX_RECV_DLENGTH |
+ ISCSI_MAX_XMIT_DLENGTH |
+ ISCSI_HDRDGST_EN |
+ ISCSI_DATADGST_EN |
+ ISCSI_INITIAL_R2T_EN |
+ ISCSI_MAX_R2T |
+ ISCSI_IMM_DATA_EN |
+ ISCSI_FIRST_BURST |
+ ISCSI_MAX_BURST |
+ ISCSI_PDU_INORDER_EN |
+ ISCSI_DATASEQ_INORDER_EN,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
+ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+ .destroy_conn = iscsi_iser_conn_destroy,
+ .set_param = iscsi_iser_conn_set_param,
+ .get_conn_param = iscsi_iser_conn_get_param,
+ .get_session_param = iscsi_iser_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ /* these are called as part of conn recovery */
+ .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
+ .terminate_conn = iscsi_iser_conn_terminate,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+ .init_cmd_task = iscsi_iser_cmd_init,
+ .xmit_cmd_task = iscsi_iser_ctask_xmit,
+ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
+ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+ .ep_connect = iscsi_iser_ep_connect,
+ .ep_poll = iscsi_iser_ep_poll,
+ .ep_disconnect = iscsi_iser_ep_disconnect
+};
+
+static int __init iser_init(void)
+{
+ int err;
+
+ iser_dbg("Starting iSER datamover...\n");
+
+ if (iscsi_max_lun < 1) {
+ printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
+ return -EINVAL;
+ }
+
+ iscsi_iser_transport.max_lun = iscsi_max_lun;
+
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+ sizeof (struct iser_desc),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (ig.desc_cache == NULL)
+ return -ENOMEM;
+
+ /* device init is called only after the first addr resolution */
+ mutex_init(&ig.device_list_mutex);
+ INIT_LIST_HEAD(&ig.device_list);
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+ }
+
+ return 0;
+
+register_transport_failure:
+ kmem_cache_destroy(ig.desc_cache);
+
+ return err;
+}
+
+static void __exit iser_exit(void)
+{
+ iser_dbg("Removing iSER datamover...\n");
+ iscsi_unregister_transport(&iscsi_iser_transport);
+ kmem_cache_destroy(ig.desc_cache);
+}
+
+module_init(iser_init);
+module_exit(iser_exit);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
new file mode 100644
index 000000000000..3350ba690cfe
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -0,0 +1,354 @@
+/*
+ * iSER transport for the Open iSCSI Initiator & iSER transport internals
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 Mike Christie
+ * based on code maintained by open-iscsi@googlegroups.com
+ *
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+#ifndef __ISCSI_ISER_H__
+#define __ISCSI_ISER_H__
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/mempool.h>
+#include <linux/uio.h>
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_fmr_pool.h>
+#include <rdma/rdma_cm.h>
+
+#define DRV_NAME "iser"
+#define PFX DRV_NAME ": "
+#define DRV_VER "0.1"
+#define DRV_DATE "May 7th, 2006"
+
+#define iser_dbg(fmt, arg...) \
+ do { \
+ if (iser_debug_level > 0) \
+ printk(KERN_DEBUG PFX "%s:" fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_err(fmt, arg...) \
+ do { \
+ printk(KERN_ERR PFX "%s:" fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+ /* support upto 512KB in one RDMA */
+#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT)
+#define ISCSI_ISER_MAX_LUN 256
+#define ISCSI_ISER_MAX_CMD_LEN 16
+
+/* QP settings */
+/* Maximal bounds on received asynchronous PDUs */
+#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
+
+#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+ ISER_MAX_RX_MISC_PDUS + \
+ ISER_MAX_TX_MISC_PDUS)
+
+/* the max TX (send) WR supported by the iSER QP is defined by *
+ * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
+ * to have at max for SCSI command. The tx posting & completion handling code *
+ * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
+ * send WR. D=8 comes from 64K/8K */
+
+#define ISER_INFLIGHT_DATAOUTS 8
+
+#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+
+#define ISER_VER 0x10
+#define ISER_WSV 0x08
+#define ISER_RSV 0x04
+
+struct iser_hdr {
+ u8 flags;
+ u8 rsvd[3];
+ __be32 write_stag; /* write rkey */
+ __be64 write_va;
+ __be32 read_stag; /* read rkey */
+ __be64 read_va;
+} __attribute__((packed));
+
+
+/* Length of an object name string */
+#define ISER_OBJECT_NAME_SIZE 64
+
+enum iser_ib_conn_state {
+ ISER_CONN_INIT, /* descriptor allocd, no conn */
+ ISER_CONN_PENDING, /* in the process of being established */
+ ISER_CONN_UP, /* up and running */
+ ISER_CONN_TERMINATING, /* in the process of being terminated */
+ ISER_CONN_DOWN, /* shut down */
+ ISER_CONN_STATES_NUM
+};
+
+enum iser_task_status {
+ ISER_TASK_STATUS_INIT = 0,
+ ISER_TASK_STATUS_STARTED,
+ ISER_TASK_STATUS_COMPLETED
+};
+
+enum iser_data_dir {
+ ISER_DIR_IN = 0, /* to initiator */
+ ISER_DIR_OUT, /* from initiator */
+ ISER_DIRS_NUM
+};
+
+struct iser_data_buf {
+ void *buf; /* pointer to the sg list */
+ unsigned int size; /* num entries of this sg */
+ unsigned long data_len; /* total data len */
+ unsigned int dma_nents; /* returned by dma_map_sg */
+ char *copy_buf; /* allocated copy buf for SGs unaligned *
+ * for rdma which are copied */
+ struct scatterlist sg_single; /* SG-ified clone of a non SG SC or *
+ * unaligned SG */
+ };
+
+/* fwd declarations */
+struct iser_device;
+struct iscsi_iser_conn;
+struct iscsi_iser_cmd_task;
+
+struct iser_mem_reg {
+ u32 lkey;
+ u32 rkey;
+ u64 va;
+ u64 len;
+ void *mem_h;
+};
+
+struct iser_regd_buf {
+ struct iser_mem_reg reg; /* memory registration info */
+ void *virt_addr;
+ struct iser_device *device; /* device->device for dma_unmap */
+ dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */
+ enum dma_data_direction direction; /* direction for dma_unmap */
+ unsigned int data_size;
+ atomic_t ref_count; /* refcount, freed when dec to 0 */
+};
+
+#define MAX_REGD_BUF_VECTOR_LEN 2
+
+struct iser_dto {
+ struct iscsi_iser_cmd_task *ctask;
+ struct iscsi_iser_conn *conn;
+ int notify_enable;
+
+ /* vector of registered buffers */
+ unsigned int regd_vector_len;
+ struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
+
+ /* offset into the registered buffer may be specified */
+ unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
+
+ /* a smaller size may be specified, if 0, then full size is used */
+ unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
+};
+
+enum iser_desc_type {
+ ISCSI_RX,
+ ISCSI_TX_CONTROL ,
+ ISCSI_TX_SCSI_COMMAND,
+ ISCSI_TX_DATAOUT
+};
+
+struct iser_desc {
+ struct iser_hdr iser_header;
+ struct iscsi_hdr iscsi_header;
+ struct iser_regd_buf hdr_regd_buf;
+ void *data; /* used by RX & TX_CONTROL */
+ struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
+ enum iser_desc_type type;
+ struct iser_dto dto;
+};
+
+struct iser_device {
+ struct ib_device *ib_device;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_mr *mr;
+ struct tasklet_struct cq_tasklet;
+ struct list_head ig_list; /* entry in ig devices list */
+ int refcount;
+};
+
+struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+ enum iser_ib_conn_state state; /* rdma connection state */
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+ struct ib_qp *qp; /* QP */
+ struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
+ int disc_evt_flag; /* disconn event delivered */
+ wait_queue_head_t wait; /* waitq for conn/disconn */
+ atomic_t post_recv_buf_count; /* posted rx count */
+ atomic_t post_send_buf_count; /* posted tx count */
+ struct work_struct comperror_work; /* conn term sleepable ctx*/
+ char name[ISER_OBJECT_NAME_SIZE];
+ struct iser_page_vec *page_vec; /* represents SG to fmr maps*
+ * maps serialized as tx is*/
+ struct list_head conn_list; /* entry in ig conn list */
+};
+
+struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
+
+ rwlock_t lock;
+};
+
+struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ int rdma_data_count;/* RDMA bytes */
+ enum iser_task_status status;
+ int command_sent; /* set if command sent */
+ int dir[ISER_DIRS_NUM]; /* set if dir use*/
+ struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
+ struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
+ struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
+};
+
+struct iser_page_vec {
+ u64 *pages;
+ int length;
+ int offset;
+ int data_size;
+};
+
+struct iser_global {
+ struct mutex device_list_mutex;/* */
+ struct list_head device_list; /* all iSER devices */
+ struct mutex connlist_mutex;
+ struct list_head connlist; /* all iSER IB connections */
+
+ kmem_cache_t *desc_cache;
+};
+
+extern struct iser_global ig;
+extern int iser_debug_level;
+
+/* allocate connection resources needed for rdma functionality */
+int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+int iser_send_control(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask);
+
+int iser_send_command(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask);
+
+int iser_send_data_out(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr);
+
+void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+int iser_conn_init(struct iser_conn **ib_conn);
+
+void iser_conn_terminate(struct iser_conn *ib_conn);
+
+void iser_conn_release(struct iser_conn *ib_conn);
+
+void iser_rcv_completion(struct iser_desc *desc,
+ unsigned long dto_xfer_len);
+
+void iser_snd_completion(struct iser_desc *desc);
+
+void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+void iser_dto_buffs_release(struct iser_dto *dto);
+
+int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
+
+void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+int iser_connect(struct iser_conn *ib_conn,
+ struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
+ int non_blocking);
+
+int iser_reg_page_vec(struct iser_conn *ib_conn,
+ struct iser_page_vec *page_vec,
+ struct iser_mem_reg *mem_reg);
+
+void iser_unreg_mem(struct iser_mem_reg *mem_reg);
+
+int iser_post_recv(struct iser_desc *rx_desc);
+int iser_post_send(struct iser_desc *tx_desc);
+
+int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
new file mode 100644
index 000000000000..ccf56f6f7236
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
+#include <linux/kfifo.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include "iscsi_iser.h"
+
+/* Constant PDU lengths calculations */
+#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
+ sizeof (struct iscsi_hdr))
+
+/* iser_dto_add_regd_buff - increments the reference count for *
+ * the registered buffer & adds it to the DTO object */
+static void iser_dto_add_regd_buff(struct iser_dto *dto,
+ struct iser_regd_buf *regd_buf,
+ unsigned long use_offset,
+ unsigned long use_size)
+{
+ int add_idx;
+
+ atomic_inc(&regd_buf->ref_count);
+
+ add_idx = dto->regd_vector_len;
+ dto->regd[add_idx] = regd_buf;
+ dto->used_sz[add_idx] = use_size;
+ dto->offset[add_idx] = use_offset;
+
+ dto->regd_vector_len++;
+}
+
+static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
+{
+ struct device *dma_device;
+
+ iser_ctask->dir[iser_dir] = 1;
+ dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+
+ data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+ iser_err("dma_map_sg failed!!!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+{
+ struct device *dma_device;
+ struct iser_data_buf *data;
+
+ dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+
+ if (iser_ctask->dir[ISER_DIR_IN]) {
+ data = &iser_ctask->data[ISER_DIR_IN];
+ dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+ if (iser_ctask->dir[ISER_DIR_OUT]) {
+ data = &iser_ctask->data[ISER_DIR_OUT];
+ dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
+ }
+}
+
+/* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+{
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
+ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
+ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+}
+
+/* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+ * ctask->data[ISER_DIR_OUT].data_len
+ */
+static int
+iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+{
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+ struct iser_dto *send_dto = &iser_ctask->desc.dto;
+ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
+ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+ iser_ctask->data[ISER_DIR_OUT].data_len,
+ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+ hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+ imm_sz);
+ }
+
+ return 0;
+}
+
+/**
+ * iser_post_receive_control - allocates, initializes and posts receive DTO.
+ */
+static int iser_post_receive_control(struct iscsi_conn *conn)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_desc *rx_desc;
+ struct iser_regd_buf *regd_hdr;
+ struct iser_regd_buf *regd_data;
+ struct iser_dto *recv_dto = NULL;
+ struct iser_device *device = iser_conn->ib_conn->device;
+ int rx_data_size, err = 0;
+
+ rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
+ if (rx_desc == NULL) {
+ iser_err("Failed to alloc desc for post recv\n");
+ return -ENOMEM;
+ }
+ rx_desc->type = ISCSI_RX;
+
+ /* for the login sequence we must support rx of upto 8K; login is done
+ * after conn create/bind (connect) and conn stop/bind (reconnect),
+ * what's common for both schemes is that the connection is not started
+ */
+ if (conn->c_stage != ISCSI_CONN_STARTED)
+ rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
+ else /* FIXME till user space sets conn->max_recv_dlength correctly */
+ rx_data_size = 128;
+
+ rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
+ if (rx_desc->data == NULL) {
+ iser_err("Failed to alloc data buf for post recv\n");
+ err = -ENOMEM;
+ goto post_rx_kmalloc_failure;
+ }
+
+ recv_dto = &rx_desc->dto;
+ recv_dto->conn = iser_conn;
+ recv_dto->regd_vector_len = 0;
+
+ regd_hdr = &rx_desc->hdr_regd_buf;
+ memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
+ regd_hdr->device = device;
+ regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
+ regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+
+ iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
+
+ iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
+
+ regd_data = &rx_desc->data_regd_buf;
+ memset(regd_data, 0, sizeof(struct iser_regd_buf));
+ regd_data->device = device;
+ regd_data->virt_addr = rx_desc->data;
+ regd_data->data_size = rx_data_size;
+
+ iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
+
+ iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
+
+ err = iser_post_recv(rx_desc);
+ if (!err)
+ return 0;
+
+ /* iser_post_recv failed */
+ iser_dto_buffs_release(recv_dto);
+ kfree(rx_desc->data);
+post_rx_kmalloc_failure:
+ kmem_cache_free(ig.desc_cache, rx_desc);
+ return err;
+}
+
+/* creates a new tx descriptor and adds header regd buffer */
+static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
+ struct iser_desc *tx_desc)
+{
+ struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf;
+ struct iser_dto *send_dto = &tx_desc->dto;
+
+ memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
+ regd_hdr->device = iser_conn->ib_conn->device;
+ regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */
+ regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+
+ send_dto->conn = iser_conn;
+ send_dto->notify_enable = 1;
+ send_dto->regd_vector_len = 0;
+
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
+ tx_desc->iser_header.flags = ISER_VER;
+
+ iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0);
+}
+
+/**
+ * iser_conn_set_full_featured_mode - (iSER API)
+ */
+int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+
+ int i;
+ /* no need to keep it in a var, we are after login so if this should
+ * be negotiated, by now the result should be available here */
+ int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
+
+ iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
+
+ /* Check that there is no posted recv or send buffers left - */
+ /* they must be consumed during the login phase */
+ BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0);
+ BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+
+ /* Initial post receive buffers */
+ for (i = 0; i < initial_post_recv_bufs_num; i++) {
+ if (iser_post_receive_control(conn) != 0) {
+ iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
+ i, conn);
+ return -ENOMEM;
+ }
+ }
+ iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
+ return 0;
+}
+
+static int
+iser_check_xmit(struct iscsi_conn *conn, void *task)
+{
+ int rc = 0;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+
+ write_lock_bh(conn->recv_lock);
+ if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
+ ISER_QP_MAX_REQ_DTOS) {
+ iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ rc = -EAGAIN;
+ }
+ write_unlock_bh(conn->recv_lock);
+ return rc;
+}
+
+
+/**
+ * iser_send_command - send command PDU
+ */
+int iser_send_command(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+ struct iscsi_cmd *hdr = ctask->hdr;
+ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+ if (iser_check_xmit(conn, ctask))
+ return -EAGAIN;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
+ send_dto = &iser_ctask->desc.dto;
+ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (sc->use_sg) { /* using a scatter list */
+ data_buf->buf = sc->request_buffer;
+ data_buf->size = sc->use_sg;
+ } else if (sc->request_bufflen) {
+ /* using a single buffer - convert it into one entry SG */
+ sg_init_one(&data_buf->sg_single,
+ sc->request_buffer, sc->request_bufflen);
+ data_buf->buf = &data_buf->sg_single;
+ data_buf->size = 1;
+ }
+
+ data_buf->data_len = sc->request_bufflen;
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+ err = iser_prepare_write_cmd(ctask,
+ ctask->imm_count,
+ ctask->imm_count +
+ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+ }
+
+ iser_reg_single(iser_conn->ib_conn->device,
+ send_dto->regd[0], DMA_TO_DEVICE);
+
+ if (iser_post_receive_control(conn) != 0) {
+ iser_err("post_recv failed!\n");
+ err = -ENOMEM;
+ goto send_command_error;
+ }
+
+ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+send_command_error:
+ iser_dto_buffs_release(send_dto);
+ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+}
+
+/**
+ * iser_send_data_out - send data out PDU
+ */
+int iser_send_data_out(struct iscsi_conn *conn,
+ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+ unsigned long data_seg_len;
+ unsigned int itt;
+ int err = 0;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+
+ if (iser_check_xmit(conn, ctask))
+ return -EAGAIN;
+
+ itt = ntohl(hdr->itt);
+ data_seg_len = ntoh24(hdr->dlength);
+ buf_offset = ntohl(hdr->offset);
+
+ iser_dbg("%s itt %d dseg_len %d offset %d\n",
+ __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
+
+ tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
+ if (tx_desc == NULL) {
+ iser_err("Failed to alloc desc for post dataout\n");
+ return -ENOMEM;
+ }
+
+ tx_desc->type = ISCSI_TX_DATAOUT;
+ memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+ send_dto->regd[0], DMA_TO_DEVICE);
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+ iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
+ itt, buf_offset, data_seg_len);
+
+
+ err = iser_post_send(tx_desc);
+ if (!err)
+ return 0;
+
+send_data_out_error:
+ iser_dto_buffs_release(send_dto);
+ kmem_cache_free(ig.desc_cache, tx_desc);
+ iser_err("conn %p failed err %d\n",conn, err);
+ return err;
+}
+
+int iser_send_control(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask)
+{
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned int itt;
+ unsigned long data_seg_len;
+ int err = 0;
+ unsigned char opcode;
+ struct iser_regd_buf *regd_buf;
+ struct iser_device *device;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+
+ if (iser_check_xmit(conn,mtask))
+ return -EAGAIN;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+ itt = ntohl(mtask->hdr->itt);
+ opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
+ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+ regd_buf->virt_addr = mtask->data;
+ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+ 0,
+ data_seg_len);
+ }
+
+ if (iser_post_receive_control(conn) != 0) {
+ iser_err("post_rcv_buff failed!\n");
+ err = -ENOMEM;
+ goto send_control_error;
+ }
+
+ err = iser_post_send(mdesc);
+ if (!err)
+ return 0;
+
+send_control_error:
+ iser_dto_buffs_release(send_dto);
+ iser_err("conn %p failed err %d\n",conn, err);
+ return err;
+}
+
+/**
+ * iser_rcv_dto_completion - recv DTO completion
+ */
+void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+{
+ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->conn;
+ struct iscsi_session *session = conn->iscsi_conn->session;
+ struct iscsi_cmd_task *ctask;
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
+ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+
+ iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt);
+
+ if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
+ rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
+ rx_data = dto->regd[1]->virt_addr;
+ rx_data += dto->offset[1];
+ }
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+ itt = hdr->itt & ISCSI_ITT_MASK; /* mask out cid and age bits */
+ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!!"
+ "conn %p opcode %d cmds_max %d itt %d\n",
+ conn->iscsi_conn,opcode,session->cmds_max,itt);
+ /* use the mapping given with the cmds array indexed by itt */
+ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
+ iser_ctask = ctask->dd_data;
+ iser_dbg("itt %d ctask %p\n",itt,ctask);
+ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
+ iser_ctask_rdma_finalize(iser_ctask);
+ }
+
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+
+ kfree(rx_desc->data);
+ kmem_cache_free(ig.desc_cache, rx_desc);
+
+ /* decrementing conn->post_recv_buf_count only --after-- freeing the *
+ * task eliminates the need to worry on tasks which are completed in *
+ * parallel to the execution of iser_conn_term. So the code that waits *
+ * for the posted rx bufs refcount to become zero handles everything */
+ atomic_dec(&conn->ib_conn->post_recv_buf_count);
+}
+
+void iser_snd_completion(struct iser_desc *tx_desc)
+{
+ struct iser_dto *dto = &tx_desc->dto;
+ struct iscsi_iser_conn *iser_conn = dto->conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+ struct iscsi_mgmt_task *mtask;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+
+ iser_dto_buffs_release(dto);
+
+ if (tx_desc->type == ISCSI_TX_DATAOUT)
+ kmem_cache_free(ig.desc_cache, tx_desc);
+
+ atomic_dec(&iser_conn->ib_conn->post_send_buf_count);
+
+ write_lock(conn->recv_lock);
+ if (conn->suspend_tx) {
+ iser_dbg("%ld resuming tx\n",jiffies);
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+ write_unlock(conn->recv_lock);
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+ mtask = (void *) ((long)(void *)tx_desc -
+ sizeof(struct iscsi_mgmt_task));
+ if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&conn->session->lock);
+ list_del(&mtask->running);
+ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
+ sizeof(void*));
+ spin_unlock(&session->lock);
+ }
+ }
+}
+
+void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+{
+ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+ iser_ctask->dir[ISER_DIR_IN] = 0;
+ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+ iser_ctask->data[ISER_DIR_IN].data_len = 0;
+ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+}
+
+void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+{
+ int deferred;
+
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL)
+ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL)
+ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+
+ if (iser_ctask->dir[ISER_DIR_IN]) {
+ deferred = iser_regd_buff_release
+ (&iser_ctask->rdma_regd[ISER_DIR_IN]);
+ if (deferred) {
+ iser_err("References remain for BUF-IN rdma reg\n");
+ BUG();
+ }
+ }
+
+ if (iser_ctask->dir[ISER_DIR_OUT]) {
+ deferred = iser_regd_buff_release
+ (&iser_ctask->rdma_regd[ISER_DIR_OUT]);
+ if (deferred) {
+ iser_err("References remain for BUF-OUT rdma reg\n");
+ BUG();
+ }
+ }
+
+ iser_dma_unmap_task_data(iser_ctask);
+}
+
+void iser_dto_buffs_release(struct iser_dto *dto)
+{
+ int i;
+
+ for (i = 0; i < dto->regd_vector_len; i++)
+ iser_regd_buff_release(dto->regd[i]);
+}
+
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
new file mode 100644
index 000000000000..31950a522a1c
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_iser.h"
+
+#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
+/**
+ * Decrements the reference count for the
+ * registered buffer & releases it
+ *
+ * returns 0 if released, 1 if deferred
+ */
+int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
+{
+ struct device *dma_device;
+
+ if ((atomic_read(&regd_buf->ref_count) == 0) ||
+ atomic_dec_and_test(&regd_buf->ref_count)) {
+ /* if we used the dma mr, unreg is just NOP */
+ if (regd_buf->reg.rkey != 0)
+ iser_unreg_mem(&regd_buf->reg);
+
+ if (regd_buf->dma_addr) {
+ dma_device = regd_buf->device->ib_device->dma_device;
+ dma_unmap_single(dma_device,
+ regd_buf->dma_addr,
+ regd_buf->data_size,
+ regd_buf->direction);
+ }
+ /* else this regd buf is associated with task which we */
+ /* dma_unmap_single/sg later */
+ return 0;
+ } else {
+ iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
+ return 1;
+ }
+}
+
+/**
+ * iser_reg_single - fills registered buffer descriptor with
+ * registration information
+ */
+void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(device->ib_device->dma_device,
+ regd_buf->virt_addr,
+ regd_buf->data_size, direction);
+ BUG_ON(dma_mapping_error(dma_addr));
+
+ regd_buf->reg.lkey = device->mr->lkey;
+ regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
+ regd_buf->reg.len = regd_buf->data_size;
+ regd_buf->reg.va = dma_addr;
+
+ regd_buf->dma_addr = dma_addr;
+ regd_buf->direction = direction;
+}
+
+/**
+ * iser_start_rdma_unaligned_sg
+ */
+int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+{
+ int dma_nents;
+ struct device *dma_device;
+ char *mem = NULL;
+ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ mem = (void *)__get_free_pages(GFP_NOIO,
+ long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
+ else
+ mem = kmalloc(cmd_data_len, GFP_NOIO);
+
+ if (mem == NULL) {
+ iser_err("Failed to allocate mem size %d %d for copying sglist\n",
+ data->size,(int)cmd_data_len);
+ return -ENOMEM;
+ }
+
+ if (cmd_dir == ISER_DIR_OUT) {
+ /* copy the unaligned sg the buffer which is used for RDMA */
+ struct scatterlist *sg = (struct scatterlist *)data->buf;
+ int i;
+ char *p, *from;
+
+ for (p = mem, i = 0; i < data->size; i++) {
+ from = kmap_atomic(sg[i].page, KM_USER0);
+ memcpy(p,
+ from + sg[i].offset,
+ sg[i].length);
+ kunmap_atomic(from, KM_USER0);
+ p += sg[i].length;
+ }
+ }
+
+ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+ iser_ctask->data_copy[cmd_dir].buf =
+ &iser_ctask->data_copy[cmd_dir].sg_single;
+ iser_ctask->data_copy[cmd_dir].size = 1;
+
+ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+ dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+
+ if (cmd_dir == ISER_DIR_OUT)
+ dma_nents = dma_map_sg(dma_device,
+ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1, DMA_TO_DEVICE);
+ else
+ dma_nents = dma_map_sg(dma_device,
+ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1, DMA_FROM_DEVICE);
+
+ BUG_ON(dma_nents == 0);
+
+ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+}
+
+/**
+ * iser_finalize_rdma_unaligned_sg
+ */
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+{
+ struct device *dma_device;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+ dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ if (cmd_dir == ISER_DIR_OUT)
+ dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
+ DMA_FROM_DEVICE);
+
+ if (cmd_dir == ISER_DIR_IN) {
+ char *mem;
+ struct scatterlist *sg;
+ unsigned char *p, *to;
+ unsigned int sg_size;
+ int i;
+
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+ sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
+ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ for (p = mem, i = 0; i < sg_size; i++){
+ to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
+ memcpy(to + sg[i].offset,
+ p,
+ sg[i].length);
+ kunmap_atomic(to, KM_SOFTIRQ0);
+ p += sg[i].length;
+ }
+ }
+
+ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+ long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
+ else
+ kfree(mem_copy->copy_buf);
+
+ mem_copy->copy_buf = NULL;
+}
+
+/**
+ * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
+ * and returns the length of resulting physical address array (may be less than
+ * the original due to possible compaction).
+ *
+ * we build a "page vec" under the assumption that the SG meets the RDMA
+ * alignment requirements. Other then the first and last SG elements, all
+ * the "internal" elements can be compacted into a list whose elements are
+ * dma addresses of physical pages. The code supports also the weird case
+ * where --few fragments of the same page-- are present in the SG as
+ * consecutive elements. Also, it handles one entry SG.
+ */
+static int iser_sg_to_page_vec(struct iser_data_buf *data,
+ struct iser_page_vec *page_vec)
+{
+ struct scatterlist *sg = (struct scatterlist *)data->buf;
+ dma_addr_t first_addr, last_addr, page;
+ int start_aligned, end_aligned;
+ unsigned int cur_page = 0;
+ unsigned long total_sz = 0;
+ int i;
+
+ /* compute the offset of first element */
+ page_vec->offset = (u64) sg[0].offset;
+
+ for (i = 0; i < data->dma_nents; i++) {
+ total_sz += sg_dma_len(&sg[i]);
+
+ first_addr = sg_dma_address(&sg[i]);
+ last_addr = first_addr + sg_dma_len(&sg[i]);
+
+ start_aligned = !(first_addr & ~PAGE_MASK);
+ end_aligned = !(last_addr & ~PAGE_MASK);
+
+ /* continue to collect page fragments till aligned or SG ends */
+ while (!end_aligned && (i + 1 < data->dma_nents)) {
+ i++;
+ total_sz += sg_dma_len(&sg[i]);
+ last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
+ end_aligned = !(last_addr & ~PAGE_MASK);
+ }
+
+ first_addr = first_addr & PAGE_MASK;
+
+ for (page = first_addr; page < last_addr; page += PAGE_SIZE)
+ page_vec->pages[cur_page++] = page;
+
+ }
+ page_vec->data_size = total_sz;
+ iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
+ return cur_page;
+}
+
+#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */
+#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0)
+
+/**
+ * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
+ * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
+ * the number of entries which are aligned correctly. Supports the case where
+ * consecutive SG elements are actually fragments of the same physcial page.
+ */
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
+{
+ struct scatterlist *sg;
+ dma_addr_t end_addr, next_addr;
+ int i, cnt;
+ unsigned int ret_len = 0;
+
+ sg = (struct scatterlist *)data->buf;
+
+ for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) {
+ /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
+ "offset: %ld sz: %ld\n", i,
+ (unsigned long)page_to_phys(sg[i].page),
+ (unsigned long)sg[i].offset,
+ (unsigned long)sg[i].length); */
+ end_addr = sg_dma_address(&sg[i]) +
+ sg_dma_len(&sg[i]);
+ /* iser_dbg("Checking sg iobuf end address "
+ "0x%08lX\n", end_addr); */
+ if (i + 1 < data->dma_nents) {
+ next_addr = sg_dma_address(&sg[i+1]);
+ /* are i, i+1 fragments of the same page? */
+ if (end_addr == next_addr)
+ continue;
+ else if (!IS_4K_ALIGNED(end_addr)) {
+ ret_len = cnt + 1;
+ break;
+ }
+ }
+ }
+ if (i == data->dma_nents)
+ ret_len = cnt; /* loop ended */
+ iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
+ ret_len, data->dma_nents, data);
+ return ret_len;
+}
+
+static void iser_data_buf_dump(struct iser_data_buf *data)
+{
+ struct scatterlist *sg = (struct scatterlist *)data->buf;
+ int i;
+
+ for (i = 0; i < data->size; i++)
+ iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
+ "off:%d sz:%d dma_len:%d\n",
+ i, (unsigned long)sg_dma_address(&sg[i]),
+ sg[i].page, sg[i].offset,
+ sg[i].length,sg_dma_len(&sg[i]));
+}
+
+static void iser_dump_page_vec(struct iser_page_vec *page_vec)
+{
+ int i;
+
+ iser_err("page vec length %d data size %d\n",
+ page_vec->length, page_vec->data_size);
+ for (i = 0; i < page_vec->length; i++)
+ iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
+}
+
+static void iser_page_vec_build(struct iser_data_buf *data,
+ struct iser_page_vec *page_vec)
+{
+ int page_vec_len = 0;
+
+ page_vec->length = 0;
+ page_vec->offset = 0;
+
+ iser_dbg("Translating sg sz: %d\n", data->dma_nents);
+ page_vec_len = iser_sg_to_page_vec(data,page_vec);
+ iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
+
+ page_vec->length = page_vec_len;
+
+ if (page_vec_len * PAGE_SIZE < page_vec->data_size) {
+ iser_err("page_vec too short to hold this SG\n");
+ iser_data_buf_dump(data);
+ iser_dump_page_vec(page_vec);
+ BUG();
+ }
+}
+
+/**
+ * iser_reg_rdma_mem - Registers memory intended for RDMA,
+ * obtaining rkey and va
+ *
+ * returns 0 on success, errno code on failure
+ */
+int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+
+ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem);
+ if (aligned_len != mem->size) {
+ iser_err("rdma alignment violation %d/%d aligned\n",
+ aligned_len, mem->size);
+ iser_data_buf_dump(mem);
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ iser_page_vec_build(mem, ib_conn->page_vec);
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
+ if (err)
+ return err;
+
+ /* take a reference on this regd buf such that it will not be released *
+ * (eg in send dto completion) before we get the scsi response */
+ atomic_inc(&regd_buf->ref_count);
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
new file mode 100644
index 000000000000..ff117bbf81b4
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include "iscsi_iser.h"
+
+#define ISCSI_ISER_MAX_CONN 8
+#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \
+ ISER_QP_MAX_REQ_DTOS) * \
+ ISCSI_ISER_MAX_CONN)
+
+static void iser_cq_tasklet_fn(unsigned long data);
+static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
+static void iser_comp_error_worker(void *data);
+
+static void iser_cq_event_callback(struct ib_event *cause, void *context)
+{
+ iser_err("got cq event %d \n", cause->event);
+}
+
+static void iser_qp_event_callback(struct ib_event *cause, void *context)
+{
+ iser_err("got qp event %d\n",cause->event);
+}
+
+/**
+ * iser_create_device_ib_res - creates Protection Domain (PD), Completion
+ * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
+ * the adapator.
+ *
+ * returns 0 on success, -1 on failure
+ */
+static int iser_create_device_ib_res(struct iser_device *device)
+{
+ device->pd = ib_alloc_pd(device->ib_device);
+ if (IS_ERR(device->pd))
+ goto pd_err;
+
+ device->cq = ib_create_cq(device->ib_device,
+ iser_cq_callback,
+ iser_cq_event_callback,
+ (void *)device,
+ ISER_MAX_CQ_LEN);
+ if (IS_ERR(device->cq))
+ goto cq_err;
+
+ if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP))
+ goto cq_arm_err;
+
+ tasklet_init(&device->cq_tasklet,
+ iser_cq_tasklet_fn,
+ (unsigned long)device);
+
+ device->mr = ib_get_dma_mr(device->pd,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(device->mr))
+ goto dma_mr_err;
+
+ return 0;
+
+dma_mr_err:
+ tasklet_kill(&device->cq_tasklet);
+cq_arm_err:
+ ib_destroy_cq(device->cq);
+cq_err:
+ ib_dealloc_pd(device->pd);
+pd_err:
+ iser_err("failed to allocate an IB resource\n");
+ return -1;
+}
+
+/**
+ * iser_free_device_ib_res - destory/dealloc/dereg the DMA MR,
+ * CQ and PD created with the device associated with the adapator.
+ */
+static void iser_free_device_ib_res(struct iser_device *device)
+{
+ BUG_ON(device->mr == NULL);
+
+ tasklet_kill(&device->cq_tasklet);
+
+ (void)ib_dereg_mr(device->mr);
+ (void)ib_destroy_cq(device->cq);
+ (void)ib_dealloc_pd(device->pd);
+
+ device->mr = NULL;
+ device->cq = NULL;
+ device->pd = NULL;
+}
+
+/**
+ * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
+ *
+ * returns 0 on success, -1 on failure
+ */
+static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+{
+ struct iser_device *device;
+ struct ib_qp_init_attr init_attr;
+ int ret;
+ struct ib_fmr_pool_param params;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
+
+ ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
+ (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
+ GFP_KERNEL);
+ if (!ib_conn->page_vec) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
+
+ params.page_shift = PAGE_SHIFT;
+ /* when the first/last SG element are not start/end *
+ * page aligned, the map whould be of N+1 pages */
+ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ /* make the pool size twice the max number of SCSI commands *
+ * the ML is expected to queue, watermark for unmap at 50% */
+ params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
+ params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.cache = 0;
+ params.flush_function = NULL;
+ params.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+
+ ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
+ if (IS_ERR(ib_conn->fmr_pool)) {
+ ret = PTR_ERR(ib_conn->fmr_pool);
+ goto fmr_pool_err;
+ }
+
+ memset(&init_attr, 0, sizeof init_attr);
+
+ init_attr.event_handler = iser_qp_event_callback;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = device->cq;
+ init_attr.recv_cq = device->cq;
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;
+ init_attr.cap.max_recv_sge = 2;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+
+ ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
+ if (ret)
+ goto qp_err;
+
+ ib_conn->qp = ib_conn->cma_id->qp;
+ iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->fmr_pool, ib_conn->cma_id->qp);
+ return ret;
+
+qp_err:
+ (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
+fmr_pool_err:
+ kfree(ib_conn->page_vec);
+alloc_err:
+ iser_err("unable to alloc mem or create resource, err %d\n", ret);
+ return ret;
+}
+
+/**
+ * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * -1 on failure
+ */
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
+{
+ BUG_ON(ib_conn == NULL);
+
+ iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->fmr_pool, ib_conn->qp);
+
+ /* qp is created only once both addr & route are resolved */
+ if (ib_conn->fmr_pool != NULL)
+ ib_destroy_fmr_pool(ib_conn->fmr_pool);
+
+ if (ib_conn->qp != NULL)
+ rdma_destroy_qp(ib_conn->cma_id);
+
+ if (ib_conn->cma_id != NULL)
+ rdma_destroy_id(ib_conn->cma_id);
+
+ ib_conn->fmr_pool = NULL;
+ ib_conn->qp = NULL;
+ ib_conn->cma_id = NULL;
+ kfree(ib_conn->page_vec);
+
+ return 0;
+}
+
+/**
+ * based on the resolved device node GUID see if there already allocated
+ * device for this device. If there's no such, create one.
+ */
+static
+struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
+{
+ struct list_head *p_list;
+ struct iser_device *device = NULL;
+
+ mutex_lock(&ig.device_list_mutex);
+
+ p_list = ig.device_list.next;
+ while (p_list != &ig.device_list) {
+ device = list_entry(p_list, struct iser_device, ig_list);
+ /* find if there's a match using the node GUID */
+ if (device->ib_device->node_guid == cma_id->device->node_guid)
+ break;
+ }
+
+ if (device == NULL) {
+ device = kzalloc(sizeof *device, GFP_KERNEL);
+ if (device == NULL)
+ goto out;
+ /* assign this device to the device */
+ device->ib_device = cma_id->device;
+ /* init the device and link it into ig device list */
+ if (iser_create_device_ib_res(device)) {
+ kfree(device);
+ device = NULL;
+ goto out;
+ }
+ list_add(&device->ig_list, &ig.device_list);
+ }
+out:
+ BUG_ON(device == NULL);
+ device->refcount++;
+ mutex_unlock(&ig.device_list_mutex);
+ return device;
+}
+
+/* if there's no demand for this device, release it */
+static void iser_device_try_release(struct iser_device *device)
+{
+ mutex_lock(&ig.device_list_mutex);
+ device->refcount--;
+ iser_err("device %p refcount %d\n",device,device->refcount);
+ if (!device->refcount) {
+ iser_free_device_ib_res(device);
+ list_del(&device->ig_list);
+ kfree(device);
+ }
+ mutex_unlock(&ig.device_list_mutex);
+}
+
+int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp)
+{
+ int ret;
+
+ spin_lock_bh(&ib_conn->lock);
+ ret = (ib_conn->state == comp);
+ spin_unlock_bh(&ib_conn->lock);
+ return ret;
+}
+
+static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp,
+ enum iser_ib_conn_state exch)
+{
+ int ret;
+
+ spin_lock_bh(&ib_conn->lock);
+ if ((ret = (ib_conn->state == comp)))
+ ib_conn->state = exch;
+ spin_unlock_bh(&ib_conn->lock);
+ return ret;
+}
+
+/**
+ * triggers start of the disconnect procedures and wait for them to be done
+ */
+void iser_conn_terminate(struct iser_conn *ib_conn)
+{
+ int err = 0;
+
+ /* change the ib conn state only if the conn is UP, however always call
+ * rdma_disconnect since this is the only way to cause the CMA to change
+ * the QP state to ERROR
+ */
+
+ iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
+ err = rdma_disconnect(ib_conn->cma_id);
+ if (err)
+ iser_err("Failed to disconnect, conn: 0x%p err %d\n",
+ ib_conn,err);
+
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+ iser_conn_release(ib_conn);
+}
+
+static void iser_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct iser_conn *ib_conn;
+ ib_conn = (struct iser_conn *)cma_id->context;
+
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
+}
+
+static void iser_addr_handler(struct rdma_cm_id *cma_id)
+{
+ struct iser_device *device;
+ struct iser_conn *ib_conn;
+ int ret;
+
+ device = iser_device_find_by_ib_device(cma_id);
+ ib_conn = (struct iser_conn *)cma_id->context;
+ ib_conn->device = device;
+
+ ret = rdma_resolve_route(cma_id, 1000);
+ if (ret) {
+ iser_err("resolve route failed: %d\n", ret);
+ iser_connect_error(cma_id);
+ }
+ return;
+}
+
+static void iser_route_handler(struct rdma_cm_id *cma_id)
+{
+ struct rdma_conn_param conn_param;
+ int ret;
+
+ ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
+ if (ret)
+ goto failure;
+
+ iser_dbg("path.mtu is %d setting it to %d\n",
+ cma_id->route.path_rec->mtu, IB_MTU_1024);
+
+ /* we must set the MTU to 1024 as this is what the target is assuming */
+ if (cma_id->route.path_rec->mtu > IB_MTU_1024)
+ cma_id->route.path_rec->mtu = IB_MTU_1024;
+
+ memset(&conn_param, 0, sizeof conn_param);
+ conn_param.responder_resources = 4;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
+
+ ret = rdma_connect(cma_id, &conn_param);
+ if (ret) {
+ iser_err("failure connecting: %d\n", ret);
+ goto failure;
+ }
+
+ return;
+failure:
+ iser_connect_error(cma_id);
+}
+
+static void iser_connected_handler(struct rdma_cm_id *cma_id)
+{
+ struct iser_conn *ib_conn;
+
+ ib_conn = (struct iser_conn *)cma_id->context;
+ ib_conn->state = ISER_CONN_UP;
+ wake_up_interruptible(&ib_conn->wait);
+}
+
+static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+{
+ struct iser_conn *ib_conn;
+
+ ib_conn = (struct iser_conn *)cma_id->context;
+ ib_conn->disc_evt_flag = 1;
+
+ /* getting here when the state is UP means that the conn is being *
+ * terminated asynchronously from the iSCSI layer's perspective. */
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING))
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+
+ /* Complete the termination process if no posts are pending */
+ if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) &&
+ (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
+ }
+}
+
+static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+ int ret = 0;
+
+ iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ iser_addr_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ iser_route_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ iser_connected_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ iser_err("event: %d, error: %d\n", event->event, event->status);
+ iser_connect_error(cma_id);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ iser_disconnected_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ BUG();
+ break;
+ case RDMA_CM_EVENT_CONNECT_RESPONSE:
+ BUG();
+ break;
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ default:
+ break;
+ }
+ return ret;
+}
+
+int iser_conn_init(struct iser_conn **ibconn)
+{
+ struct iser_conn *ib_conn;
+
+ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
+ if (!ib_conn) {
+ iser_err("can't alloc memory for struct iser_conn\n");
+ return -ENOMEM;
+ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+ INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
+ ib_conn);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
+
+ *ibconn = ib_conn;
+ return 0;
+}
+
+ /**
+ * starts the process of connecting to the target
+ * sleeps untill the connection is established or rejected
+ */
+int iser_connect(struct iser_conn *ib_conn,
+ struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
+ int non_blocking)
+{
+ struct sockaddr *src, *dst;
+ int err = 0;
+
+ sprintf(ib_conn->name,"%d.%d.%d.%d:%d",
+ NIPQUAD(dst_addr->sin_addr.s_addr), dst_addr->sin_port);
+
+ /* the device is known only --after-- address resolution */
+ ib_conn->device = NULL;
+
+ iser_err("connecting to: %d.%d.%d.%d, port 0x%x\n",
+ NIPQUAD(dst_addr->sin_addr), dst_addr->sin_port);
+
+ ib_conn->state = ISER_CONN_PENDING;
+
+ ib_conn->cma_id = rdma_create_id(iser_cma_handler,
+ (void *)ib_conn,
+ RDMA_PS_TCP);
+ if (IS_ERR(ib_conn->cma_id)) {
+ err = PTR_ERR(ib_conn->cma_id);
+ iser_err("rdma_create_id failed: %d\n", err);
+ goto id_failure;
+ }
+
+ src = (struct sockaddr *)src_addr;
+ dst = (struct sockaddr *)dst_addr;
+ err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
+ if (err) {
+ iser_err("rdma_resolve_addr failed: %d\n", err);
+ goto addr_failure;
+ }
+
+ if (!non_blocking) {
+ wait_event_interruptible(ib_conn->wait,
+ (ib_conn->state != ISER_CONN_PENDING));
+
+ if (ib_conn->state != ISER_CONN_UP) {
+ err = -EIO;
+ goto connect_failure;
+ }
+ }
+
+ mutex_lock(&ig.connlist_mutex);
+ list_add(&ib_conn->conn_list, &ig.connlist);
+ mutex_unlock(&ig.connlist_mutex);
+ return 0;
+
+id_failure:
+ ib_conn->cma_id = NULL;
+addr_failure:
+ ib_conn->state = ISER_CONN_DOWN;
+connect_failure:
+ iser_conn_release(ib_conn);
+ return err;
+}
+
+/**
+ * Frees all conn objects and deallocs conn descriptor
+ */
+void iser_conn_release(struct iser_conn *ib_conn)
+{
+ struct iser_device *device = ib_conn->device;
+
+ BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+
+ mutex_lock(&ig.connlist_mutex);
+ list_del(&ib_conn->conn_list);
+ mutex_unlock(&ig.connlist_mutex);
+
+ iser_free_ib_conn_res(ib_conn);
+ ib_conn->device = NULL;
+ /* on EVENT_ADDR_ERROR there's no device yet for this conn */
+ if (device != NULL)
+ iser_device_try_release(device);
+ kfree(ib_conn);
+}
+
+
+/**
+ * iser_reg_page_vec - Register physical memory
+ *
+ * returns: 0 on success, errno code on failure
+ */
+int iser_reg_page_vec(struct iser_conn *ib_conn,
+ struct iser_page_vec *page_vec,
+ struct iser_mem_reg *mem_reg)
+{
+ struct ib_pool_fmr *mem;
+ u64 io_addr;
+ u64 *page_list;
+ int status;
+
+ page_list = page_vec->pages;
+ io_addr = page_list[0];
+
+ mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
+ page_list,
+ page_vec->length,
+ &io_addr);
+
+ if (IS_ERR(mem)) {
+ status = (int)PTR_ERR(mem);
+ iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
+ return status;
+ }
+
+ mem_reg->lkey = mem->fmr->lkey;
+ mem_reg->rkey = mem->fmr->rkey;
+ mem_reg->len = page_vec->length * PAGE_SIZE;
+ mem_reg->va = io_addr;
+ mem_reg->mem_h = (void *)mem;
+
+ mem_reg->va += page_vec->offset;
+ mem_reg->len = page_vec->data_size;
+
+ iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
+ "entry[0]: (0x%08lx,%ld)] -> "
+ "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
+ page_vec, page_vec->length,
+ (unsigned long)page_vec->pages[0],
+ (unsigned long)page_vec->data_size,
+ (unsigned int)mem_reg->lkey, mem_reg->mem_h,
+ (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
+ return 0;
+}
+
+/**
+ * Unregister (previosuly registered) memory.
+ */
+void iser_unreg_mem(struct iser_mem_reg *reg)
+{
+ int ret;
+
+ iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
+
+ ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
+ if (ret)
+ iser_err("ib_fmr_pool_unmap failed %d\n", ret);
+
+ reg->mem_h = NULL;
+}
+
+/**
+ * iser_dto_to_iov - builds IOV from a dto descriptor
+ */
+static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
+{
+ int i;
+ struct ib_sge *sge;
+ struct iser_regd_buf *regd_buf;
+
+ if (dto->regd_vector_len > iov_len) {
+ iser_err("iov size %d too small for posting dto of len %d\n",
+ iov_len, dto->regd_vector_len);
+ BUG();
+ }
+
+ for (i = 0; i < dto->regd_vector_len; i++) {
+ sge = &iov[i];
+ regd_buf = dto->regd[i];
+
+ sge->addr = regd_buf->reg.va;
+ sge->length = regd_buf->reg.len;
+ sge->lkey = regd_buf->reg.lkey;
+
+ if (dto->used_sz[i] > 0) /* Adjust size */
+ sge->length = dto->used_sz[i];
+
+ /* offset and length should not exceed the regd buf length */
+ if (sge->length + dto->offset[i] > regd_buf->reg.len) {
+ iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
+ "%ld in dto:0x%p [%d], va:0x%08lX\n",
+ (unsigned long)sge->length, dto->offset[i],
+ (unsigned long)regd_buf->reg.len, dto, i,
+ (unsigned long)sge->addr);
+ BUG();
+ }
+
+ sge->addr += dto->offset[i]; /* Adjust offset */
+ }
+}
+
+/**
+ * iser_post_recv - Posts a receive buffer.
+ *
+ * returns 0 on success, -1 on failure
+ */
+int iser_post_recv(struct iser_desc *rx_desc)
+{
+ int ib_ret, ret_val = 0;
+ struct ib_recv_wr recv_wr, *recv_wr_failed;
+ struct ib_sge iov[2];
+ struct iser_conn *ib_conn;
+ struct iser_dto *recv_dto = &rx_desc->dto;
+
+ /* Retrieve conn */
+ ib_conn = recv_dto->conn->ib_conn;
+
+ iser_dto_to_iov(recv_dto, iov, 2);
+
+ recv_wr.next = NULL;
+ recv_wr.sg_list = iov;
+ recv_wr.num_sge = recv_dto->regd_vector_len;
+ recv_wr.wr_id = (unsigned long)rx_desc;
+
+ atomic_inc(&ib_conn->post_recv_buf_count);
+ ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed);
+ if (ib_ret) {
+ iser_err("ib_post_recv failed ret=%d\n", ib_ret);
+ atomic_dec(&ib_conn->post_recv_buf_count);
+ ret_val = -1;
+ }
+
+ return ret_val;
+}
+
+/**
+ * iser_start_send - Initiate a Send DTO operation
+ *
+ * returns 0 on success, -1 on failure
+ */
+int iser_post_send(struct iser_desc *tx_desc)
+{
+ int ib_ret, ret_val = 0;
+ struct ib_send_wr send_wr, *send_wr_failed;
+ struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
+ struct iser_conn *ib_conn;
+ struct iser_dto *dto = &tx_desc->dto;
+
+ ib_conn = dto->conn->ib_conn;
+
+ iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
+
+ send_wr.next = NULL;
+ send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.sg_list = iov;
+ send_wr.num_sge = dto->regd_vector_len;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0;
+
+ atomic_inc(&ib_conn->post_send_buf_count);
+
+ ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
+ if (ib_ret) {
+ iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
+ dto, dto->regd_vector_len);
+ iser_err("ib_post_send failed, ret:%d\n", ib_ret);
+ atomic_dec(&ib_conn->post_send_buf_count);
+ ret_val = -1;
+ }
+
+ return ret_val;
+}
+
+static void iser_comp_error_worker(void *data)
+{
+ struct iser_conn *ib_conn = data;
+
+ /* getting here when the state is UP means that the conn is being *
+ * terminated asynchronously from the iSCSI layer's perspective. */
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING))
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+
+ /* complete the termination process if disconnect event was delivered *
+ * note there are no more non completed posts to the QP */
+ if (ib_conn->disc_evt_flag) {
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
+ }
+}
+
+static void iser_handle_comp_error(struct iser_desc *desc)
+{
+ struct iser_dto *dto = &desc->dto;
+ struct iser_conn *ib_conn = dto->conn->ib_conn;
+
+ iser_dto_buffs_release(dto);
+
+ if (desc->type == ISCSI_RX) {
+ kfree(desc->data);
+ kmem_cache_free(ig.desc_cache, desc);
+ atomic_dec(&ib_conn->post_recv_buf_count);
+ } else { /* type is TX control/command/dataout */
+ if (desc->type == ISCSI_TX_DATAOUT)
+ kmem_cache_free(ig.desc_cache, desc);
+ atomic_dec(&ib_conn->post_send_buf_count);
+ }
+
+ if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
+ atomic_read(&ib_conn->post_send_buf_count) == 0)
+ schedule_work(&ib_conn->comperror_work);
+}
+
+static void iser_cq_tasklet_fn(unsigned long data)
+{
+ struct iser_device *device = (struct iser_device *)data;
+ struct ib_cq *cq = device->cq;
+ struct ib_wc wc;
+ struct iser_desc *desc;
+ unsigned long xfer_len;
+
+ while (ib_poll_cq(cq, 1, &wc) == 1) {
+ desc = (struct iser_desc *) (unsigned long) wc.wr_id;
+ BUG_ON(desc == NULL);
+
+ if (wc.status == IB_WC_SUCCESS) {
+ if (desc->type == ISCSI_RX) {
+ xfer_len = (unsigned long)wc.byte_len;
+ iser_rcv_completion(desc, xfer_len);
+ } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
+ iser_snd_completion(desc);
+ } else {
+ iser_err("comp w. error op %d status %d\n",desc->type,wc.status);
+ iser_handle_comp_error(desc);
+ }
+ }
+ /* #warning "it is assumed here that arming CQ only once its empty" *
+ * " would not cause interrupts to be missed" */
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+}
+
+static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
+{
+ struct iser_device *device = (struct iser_device *)cq_context;
+
+ tasklet_schedule(&device->cq_tasklet);
+}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 5f561fce32d8..a29d5ceb00cf 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -78,14 +78,19 @@ static int evdev_fasync(int fd, struct file *file, int on)
{
int retval;
struct evdev_list *list = file->private_data;
+
retval = fasync_helper(fd, file, on, &list->fasync);
+
return retval < 0 ? retval : 0;
}
-static int evdev_flush(struct file * file, fl_owner_t id)
+static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_list *list = file->private_data;
- if (!list->evdev->exist) return -ENODEV;
+
+ if (!list->evdev->exist)
+ return -ENODEV;
+
return input_flush_device(&list->evdev->handle, file);
}
@@ -300,6 +305,7 @@ static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count
static unsigned int evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_list *list = file->private_data;
+
poll_wait(file, &list->evdev->wait, wait);
return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) |
(list->evdev->exist ? 0 : (POLLHUP | POLLERR));
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 3038c268917d..a90486f5e491 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -28,20 +28,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("Input core");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(input_allocate_device);
-EXPORT_SYMBOL(input_register_device);
-EXPORT_SYMBOL(input_unregister_device);
-EXPORT_SYMBOL(input_register_handler);
-EXPORT_SYMBOL(input_unregister_handler);
-EXPORT_SYMBOL(input_grab_device);
-EXPORT_SYMBOL(input_release_device);
-EXPORT_SYMBOL(input_open_device);
-EXPORT_SYMBOL(input_close_device);
-EXPORT_SYMBOL(input_accept_process);
-EXPORT_SYMBOL(input_flush_device);
-EXPORT_SYMBOL(input_event);
-EXPORT_SYMBOL_GPL(input_class);
-
#define INPUT_DEVICES 256
static LIST_HEAD(input_dev_list);
@@ -63,11 +49,13 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
case EV_SYN:
switch (code) {
case SYN_CONFIG:
- if (dev->event) dev->event(dev, type, code, value);
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
case SYN_REPORT:
- if (dev->sync) return;
+ if (dev->sync)
+ return;
dev->sync = 1;
break;
}
@@ -136,7 +124,8 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
if (code > MSC_MAX || !test_bit(code, dev->mscbit))
return;
- if (dev->event) dev->event(dev, type, code, value);
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
@@ -146,7 +135,9 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
return;
change_bit(code, dev->led);
- if (dev->event) dev->event(dev, type, code, value);
+
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
@@ -158,21 +149,25 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
if (!!test_bit(code, dev->snd) != !!value)
change_bit(code, dev->snd);
- if (dev->event) dev->event(dev, type, code, value);
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
case EV_REP:
- if (code > REP_MAX || value < 0 || dev->rep[code] == value) return;
+ if (code > REP_MAX || value < 0 || dev->rep[code] == value)
+ return;
dev->rep[code] = value;
- if (dev->event) dev->event(dev, type, code, value);
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
case EV_FF:
- if (dev->event) dev->event(dev, type, code, value);
+ if (dev->event)
+ dev->event(dev, type, code, value);
break;
}
@@ -186,6 +181,7 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
if (handle->open)
handle->handler->event(handle, type, code, value);
}
+EXPORT_SYMBOL(input_event);
static void input_repeat_key(unsigned long data)
{
@@ -208,6 +204,7 @@ int input_accept_process(struct input_handle *handle, struct file *file)
return 0;
}
+EXPORT_SYMBOL(input_accept_process);
int input_grab_device(struct input_handle *handle)
{
@@ -217,12 +214,14 @@ int input_grab_device(struct input_handle *handle)
handle->dev->grab = handle;
return 0;
}
+EXPORT_SYMBOL(input_grab_device);
void input_release_device(struct input_handle *handle)
{
if (handle->dev->grab == handle)
handle->dev->grab = NULL;
}
+EXPORT_SYMBOL(input_release_device);
int input_open_device(struct input_handle *handle)
{
@@ -245,6 +244,7 @@ int input_open_device(struct input_handle *handle)
return err;
}
+EXPORT_SYMBOL(input_open_device);
int input_flush_device(struct input_handle* handle, struct file* file)
{
@@ -253,6 +253,7 @@ int input_flush_device(struct input_handle* handle, struct file* file)
return 0;
}
+EXPORT_SYMBOL(input_flush_device);
void input_close_device(struct input_handle *handle)
{
@@ -268,6 +269,7 @@ void input_close_device(struct input_handle *handle)
mutex_unlock(&dev->mutex);
}
+EXPORT_SYMBOL(input_close_device);
static void input_link_handle(struct input_handle *handle)
{
@@ -335,9 +337,11 @@ static inline void input_wakeup_procfs_readers(void)
static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
{
int state = input_devices_state;
+
poll_wait(file, &input_devices_poll_wait, wait);
if (state != input_devices_state)
return POLLIN | POLLRDNORM;
+
return 0;
}
@@ -629,7 +633,7 @@ static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
len = input_print_modalias(buf, PAGE_SIZE, id, 1);
- return max_t(int, len, PAGE_SIZE);
+ return min_t(int, len, PAGE_SIZE);
}
static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
@@ -862,6 +866,7 @@ struct class input_class = {
.release = input_dev_release,
.uevent = input_dev_uevent,
};
+EXPORT_SYMBOL_GPL(input_class);
struct input_dev *input_allocate_device(void)
{
@@ -872,12 +877,27 @@ struct input_dev *input_allocate_device(void)
dev->dynalloc = 1;
dev->cdev.class = &input_class;
class_device_initialize(&dev->cdev);
+ mutex_init(&dev->mutex);
INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node);
}
return dev;
}
+EXPORT_SYMBOL(input_allocate_device);
+
+void input_free_device(struct input_dev *dev)
+{
+ if (dev) {
+
+ mutex_lock(&dev->mutex);
+ dev->name = dev->phys = dev->uniq = NULL;
+ mutex_unlock(&dev->mutex);
+
+ input_put_device(dev);
+ }
+}
+EXPORT_SYMBOL(input_free_device);
int input_register_device(struct input_dev *dev)
{
@@ -895,7 +915,6 @@ int input_register_device(struct input_dev *dev)
return -EINVAL;
}
- mutex_init(&dev->mutex);
set_bit(EV_SYN, dev->evbit);
/*
@@ -956,12 +975,14 @@ int input_register_device(struct input_dev *dev)
fail1: class_device_del(&dev->cdev);
return error;
}
+EXPORT_SYMBOL(input_register_device);
void input_unregister_device(struct input_dev *dev)
{
- struct list_head * node, * next;
+ struct list_head *node, *next;
- if (!dev) return;
+ if (!dev)
+ return;
del_timer_sync(&dev->timer);
@@ -977,10 +998,16 @@ void input_unregister_device(struct input_dev *dev)
sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group);
sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group);
sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group);
+
+ mutex_lock(&dev->mutex);
+ dev->name = dev->phys = dev->uniq = NULL;
+ mutex_unlock(&dev->mutex);
+
class_device_unregister(&dev->cdev);
input_wakeup_procfs_readers();
}
+EXPORT_SYMBOL(input_unregister_device);
void input_register_handler(struct input_handler *handler)
{
@@ -988,7 +1015,8 @@ void input_register_handler(struct input_handler *handler)
struct input_handle *handle;
struct input_device_id *id;
- if (!handler) return;
+ if (!handler)
+ return;
INIT_LIST_HEAD(&handler->h_list);
@@ -1005,10 +1033,11 @@ void input_register_handler(struct input_handler *handler)
input_wakeup_procfs_readers();
}
+EXPORT_SYMBOL(input_register_handler);
void input_unregister_handler(struct input_handler *handler)
{
- struct list_head * node, * next;
+ struct list_head *node, *next;
list_for_each_safe(node, next, &handler->h_list) {
struct input_handle * handle = to_handle_h(node);
@@ -1024,6 +1053,7 @@ void input_unregister_handler(struct input_handler *handler)
input_wakeup_procfs_readers();
}
+EXPORT_SYMBOL(input_unregister_handler);
static int input_open_file(struct inode *inode, struct file *file)
{
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 949bdcef8c2b..d67157513bf7 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -81,10 +81,7 @@ static int joydev_correct(int value, struct js_corr *corr)
return 0;
}
- if (value < -32767) return -32767;
- if (value > 32767) return 32767;
-
- return value;
+ return value < -32767 ? -32767 : (value > 32767 ? 32767 : value);
}
static void joydev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
@@ -96,7 +93,8 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne
switch (type) {
case EV_KEY:
- if (code < BTN_MISC || value == 2) return;
+ if (code < BTN_MISC || value == 2)
+ return;
event.type = JS_EVENT_BUTTON;
event.number = joydev->keymap[code - BTN_MISC];
event.value = value;
@@ -106,7 +104,8 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne
event.type = JS_EVENT_AXIS;
event.number = joydev->absmap[code];
event.value = joydev_correct(value, joydev->corr + event.number);
- if (event.value == joydev->abs[event.number]) return;
+ if (event.value == joydev->abs[event.number])
+ return;
joydev->abs[event.number] = event.value;
break;
@@ -134,7 +133,9 @@ static int joydev_fasync(int fd, struct file *file, int on)
{
int retval;
struct joydev_list *list = file->private_data;
+
retval = fasync_helper(fd, file, on, &list->fasync);
+
return retval < 0 ? retval : 0;
}
@@ -222,12 +223,12 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo
return sizeof(struct JS_DATA_TYPE);
}
- if (list->startup == joydev->nabs + joydev->nkey
- && list->head == list->tail && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ if (list->startup == joydev->nabs + joydev->nkey &&
+ list->head == list->tail && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
retval = wait_event_interruptible(list->joydev->wait,
- !list->joydev->exist ||
+ !list->joydev->exist ||
list->startup < joydev->nabs + joydev->nkey ||
list->head != list->tail);
@@ -276,8 +277,9 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo
static unsigned int joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_list *list = file->private_data;
+
poll_wait(file, &list->joydev->wait, wait);
- return ((list->head != list->tail || list->startup < list->joydev->nabs + list->joydev->nkey) ?
+ return ((list->head != list->tail || list->startup < list->joydev->nabs + list->joydev->nkey) ?
(POLLIN | POLLRDNORM) : 0) | (list->joydev->exist ? 0 : (POLLHUP | POLLERR));
}
@@ -291,20 +293,26 @@ static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __u
case JS_SET_CAL:
return copy_from_user(&joydev->glue.JS_CORR, argp,
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
+
case JS_GET_CAL:
return copy_to_user(argp, &joydev->glue.JS_CORR,
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
+
case JS_SET_TIMEOUT:
return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
+
case JS_GET_TIMEOUT:
return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JSIOCGVERSION:
return put_user(JS_VERSION, (__u32 __user *) argp);
+
case JSIOCGAXES:
return put_user(joydev->nabs, (__u8 __user *) argp);
+
case JSIOCGBUTTONS:
return put_user(joydev->nkey, (__u8 __user *) argp);
+
case JSIOCSCORR:
if (copy_from_user(joydev->corr, argp,
sizeof(joydev->corr[0]) * joydev->nabs))
@@ -314,38 +322,49 @@ static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __u
joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i);
}
return 0;
+
case JSIOCGCORR:
return copy_to_user(argp, joydev->corr,
sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
+
case JSIOCSAXMAP:
if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1)))
return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
- if (joydev->abspam[i] > ABS_MAX) return -EINVAL;
+ if (joydev->abspam[i] > ABS_MAX)
+ return -EINVAL;
joydev->absmap[joydev->abspam[i]] = i;
}
return 0;
+
case JSIOCGAXMAP:
return copy_to_user(argp, joydev->abspam,
sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0;
+
case JSIOCSBTNMAP:
if (copy_from_user(joydev->keypam, argp, sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)))
return -EFAULT;
for (i = 0; i < joydev->nkey; i++) {
- if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC) return -EINVAL;
+ if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC)
+ return -EINVAL;
joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
}
return 0;
+
case JSIOCGBTNMAP:
return copy_to_user(argp, joydev->keypam,
sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0;
+
default:
if ((cmd & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) == JSIOCGNAME(0)) {
int len;
- if (!dev->name) return 0;
+ if (!dev->name)
+ return 0;
len = strlen(dev->name) + 1;
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- if (copy_to_user(argp, dev->name, len)) return -EFAULT;
+ if (len > _IOC_SIZE(cmd))
+ len = _IOC_SIZE(cmd);
+ if (copy_to_user(argp, dev->name, len))
+ return -EFAULT;
return len;
}
}
@@ -362,7 +381,9 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
struct JS_DATA_SAVE_TYPE_32 ds32;
int err;
- if (!joydev->exist) return -ENODEV;
+ if (!joydev->exist)
+ return -ENODEV;
+
switch(cmd) {
case JS_SET_TIMELIMIT:
err = get_user(tmp32, (s32 __user *) arg);
@@ -395,8 +416,7 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
ds32.JS_SAVE = joydev->glue.JS_SAVE;
ds32.JS_CORR = joydev->glue.JS_CORR;
- err = copy_to_user(argp, &ds32,
- sizeof(ds32)) ? -EFAULT : 0;
+ err = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0;
break;
default:
@@ -412,7 +432,8 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
struct joydev *joydev = list->joydev;
void __user *argp = (void __user *)arg;
- if (!joydev->exist) return -ENODEV;
+ if (!joydev->exist)
+ return -ENODEV;
switch(cmd) {
case JS_SET_TIMELIMIT:
@@ -546,8 +567,8 @@ static struct input_device_id joydev_blacklist[] = {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT(EV_KEY) },
.keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
- }, /* Avoid itouchpads, touchscreens and tablets */
- { }, /* Terminating entry */
+ }, /* Avoid itouchpads, touchscreens and tablets */
+ { } /* Terminating entry */
};
static struct input_device_id joydev_ids[] = {
@@ -566,7 +587,7 @@ static struct input_device_id joydev_ids[] = {
.evbit = { BIT(EV_ABS) },
.absbit = { BIT(ABS_THROTTLE) },
},
- { }, /* Terminating entry */
+ { } /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, joydev_ids);
@@ -579,7 +600,7 @@ static struct input_handler joydev_handler = {
.minor = JOYDEV_MINOR_BASE,
.name = "joydev",
.id_table = joydev_ids,
- .blacklist = joydev_blacklist,
+ .blacklist = joydev_blacklist,
};
static int __init joydev_init(void)
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 4612d13ea756..b11a4bbc84c4 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -306,7 +306,7 @@ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv)
gameport_set_poll_handler(gameport, a3d_poll);
gameport_set_poll_interval(gameport, 20);
- sprintf(a3d->phys, "%s/input0", gameport->phys);
+ snprintf(a3d->phys, sizeof(a3d->phys), "%s/input0", gameport->phys);
input_dev->name = a3d_names[a3d->mode];
input_dev->phys = a3d->phys;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3121961e3e7c..01dc0b195d59 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -408,21 +408,23 @@ static void analog_calibrate_timer(struct analog_port *port)
static void analog_name(struct analog *analog)
{
- sprintf(analog->name, "Analog %d-axis %d-button",
- hweight8(analog->mask & ANALOG_AXES_STD),
- hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 +
- hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4);
+ snprintf(analog->name, sizeof(analog->name), "Analog %d-axis %d-button",
+ hweight8(analog->mask & ANALOG_AXES_STD),
+ hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 +
+ hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4);
if (analog->mask & ANALOG_HATS_ALL)
- sprintf(analog->name, "%s %d-hat",
- analog->name, hweight16(analog->mask & ANALOG_HATS_ALL));
+ snprintf(analog->name, sizeof(analog->name), "%s %d-hat",
+ analog->name, hweight16(analog->mask & ANALOG_HATS_ALL));
if (analog->mask & ANALOG_HAT_FCS)
- strcat(analog->name, " FCS");
+ strlcat(analog->name, " FCS", sizeof(analog->name));
if (analog->mask & ANALOG_ANY_CHF)
- strcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF");
+ strlcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF",
+ sizeof(analog->name));
- strcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick");
+ strlcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick",
+ sizeof(analog->name));
}
/*
@@ -435,7 +437,8 @@ static int analog_init_device(struct analog_port *port, struct analog *analog, i
int i, j, t, v, w, x, y, z;
analog_name(analog);
- sprintf(analog->phys, "%s/input%d", port->gameport->phys, index);
+ snprintf(analog->phys, sizeof(analog->phys),
+ "%s/input%d", port->gameport->phys, index);
analog->buttons = (analog->mask & ANALOG_GAMEPAD) ? analog_pad_btn : analog_joy_btn;
analog->dev = input_dev = input_allocate_device();
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 1909f7ef340c..d5e42eb88a20 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -202,7 +202,8 @@ static int cobra_connect(struct gameport *gameport, struct gameport_driver *drv)
goto fail3;
}
- sprintf(cobra->phys[i], "%s/input%d", gameport->phys, i);
+ snprintf(cobra->phys[i], sizeof(cobra->phys[i]),
+ "%s/input%d", gameport->phys, i);
input_dev->name = "Creative Labs Blaster GamePad Cobra";
input_dev->phys = cobra->phys[i];
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index e61894685cb1..5080e15c6d30 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -584,7 +584,7 @@ static struct db9 __init *db9_probe(int parport, int mode)
goto err_out;
}
- if (db9_mode[mode].bidirectional && !(pp->modes & PARPORT_MODE_TRISTATE)) {
+ if (db9_mode->bidirectional && !(pp->modes & PARPORT_MODE_TRISTATE)) {
printk(KERN_ERR "db9.c: specified parport is not bidirectional\n");
err = -EINVAL;
goto err_put_pp;
@@ -620,7 +620,8 @@ static struct db9 __init *db9_probe(int parport, int mode)
goto err_unreg_devs;
}
- sprintf(db9->phys[i], "%s/input%d", db9->pd->port->name, i);
+ snprintf(db9->phys[i], sizeof(db9->phys[i]),
+ "%s/input%d", db9->pd->port->name, i);
input_dev->name = db9_mode->name;
input_dev->phys = db9->phys[i];
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index ecbdb6b9bbd6..fe12aa37393d 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -761,7 +761,8 @@ static struct gc __init *gc_probe(int parport, int *pads, int n_pads)
if (!pads[i])
continue;
- sprintf(gc->phys[i], "%s/input%d", gc->pd->port->name, i);
+ snprintf(gc->phys[i], sizeof(gc->phys[i]),
+ "%s/input%d", gc->pd->port->name, i);
err = gc_setup_pad(gc, i, pads[i]);
if (err)
goto err_unreg_devs;
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 8a3ad455eb38..e4a699f6ec87 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -298,7 +298,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
gameport_set_poll_handler(gameport, gf2k_poll);
gameport_set_poll_interval(gameport, 20);
- sprintf(gf2k->phys, "%s/input0", gameport->phys);
+ snprintf(gf2k->phys, sizeof(gf2k->phys), "%s/input0", gameport->phys);
gf2k->length = gf2k_lens[gf2k->id];
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index 20cb98ac2d79..17a90c436de8 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -354,7 +354,8 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
goto fail3;
}
- sprintf(grip->phys[i], "%s/input%d", gameport->phys, i);
+ snprintf(grip->phys[i], sizeof(grip->phys[i]),
+ "%s/input%d", gameport->phys, i);
input_dev->name = grip_name[grip->mode[i]];
input_dev->phys = grip->phys[i];
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 6e2c721c26ba..840ed9b512b2 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -222,7 +222,7 @@ static int guillemot_connect(struct gameport *gameport, struct gameport_driver *
gameport_set_poll_handler(gameport, guillemot_poll);
gameport_set_poll_interval(gameport, 20);
- sprintf(guillemot->phys, "%s/input0", gameport->phys);
+ snprintf(guillemot->phys, sizeof(guillemot->phys), "%s/input0", gameport->phys);
guillemot->type = guillemot_type + i;
input_dev->name = guillemot_type[i].name;
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 2b8e8456c9fa..50c90765aee1 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -47,7 +47,7 @@ static int make_magnitude_modifier(struct iforce* iforce,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
mutex_unlock(&iforce->mem_mutex);
- return -ENOMEM;
+ return -ENOSPC;
}
mutex_unlock(&iforce->mem_mutex);
}
@@ -80,7 +80,7 @@ static int make_period_modifier(struct iforce* iforce,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
mutex_unlock(&iforce->mem_mutex);
- return -ENOMEM;
+ return -ENOSPC;
}
mutex_unlock(&iforce->mem_mutex);
}
@@ -120,7 +120,7 @@ static int make_envelope_modifier(struct iforce* iforce,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
mutex_unlock(&iforce->mem_mutex);
- return -ENOMEM;
+ return -ENOSPC;
}
mutex_unlock(&iforce->mem_mutex);
}
@@ -157,7 +157,7 @@ static int make_condition_modifier(struct iforce* iforce,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
mutex_unlock(&iforce->mem_mutex);
- return -ENOMEM;
+ return -ENOSPC;
}
mutex_unlock(&iforce->mem_mutex);
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index ab0a26b924ca..6d99e3c37884 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -86,7 +86,7 @@ static struct iforce_device iforce_device[] = {
static int iforce_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
- struct iforce* iforce = (struct iforce*)(dev->private);
+ struct iforce* iforce = dev->private;
unsigned char data[3];
if (type != EV_FF)
@@ -138,7 +138,7 @@ static int iforce_input_event(struct input_dev *dev, unsigned int type, unsigned
*/
static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect)
{
- struct iforce* iforce = (struct iforce*)(dev->private);
+ struct iforce* iforce = dev->private;
int id;
int ret;
int is_update;
@@ -218,7 +218,7 @@ static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect)
*/
static int iforce_erase_effect(struct input_dev *dev, int effect_id)
{
- struct iforce* iforce = (struct iforce*)(dev->private);
+ struct iforce* iforce = dev->private;
int err = 0;
struct iforce_core_effect* core_effect;
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index c4ed01758226..bbfeb9c59b87 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -251,7 +251,7 @@ static int interact_connect(struct gameport *gameport, struct gameport_driver *d
gameport_set_poll_handler(gameport, interact_poll);
gameport_set_poll_interval(gameport, 20);
- sprintf(interact->phys, "%s/input0", gameport->phys);
+ snprintf(interact->phys, sizeof(interact->phys), "%s/input0", gameport->phys);
interact->type = i;
interact->length = interact_type[i].length;
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index ca3cc2319d6a..168b1061a03b 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -162,7 +162,7 @@ static int magellan_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
magellan->dev = input_dev;
- sprintf(magellan->phys, "%s/input0", serio->phys);
+ snprintf(magellan->phys, sizeof(magellan->phys), "%s/input0", serio->phys);
input_dev->name = "LogiCad3D Magellan / SpaceMouse";
input_dev->phys = magellan->phys;
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 95c0de7964a0..e58b22c018e4 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -541,7 +541,7 @@ static void sw_print_packet(char *name, int length, unsigned char *buf, char bit
* Unfortunately I don't know how to do this for the other SW types.
*/
-static void sw_3dp_id(unsigned char *buf, char *comment)
+static void sw_3dp_id(unsigned char *buf, char *comment, size_t size)
{
int i;
char pnp[8], rev[9];
@@ -554,7 +554,7 @@ static void sw_3dp_id(unsigned char *buf, char *comment)
pnp[7] = rev[8] = 0;
- sprintf(comment, " [PnP %d.%02d id %s rev %s]",
+ snprintf(comment, size, " [PnP %d.%02d id %s rev %s]",
(int) ((sw_get_bits(buf, 8, 6, 1) << 6) | /* Two 6-bit values */
sw_get_bits(buf, 16, 6, 1)) / 100,
(int) ((sw_get_bits(buf, 8, 6, 1) << 6) |
@@ -695,7 +695,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->type = SW_ID_FFP;
sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on");
} else
- sw->type = SW_ID_PP;
+ sw->type = SW_ID_PP;
break;
case 66:
sw->bits = 3;
@@ -703,7 +703,8 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->length = 22;
case 64:
sw->type = SW_ID_3DP;
- if (j == 160) sw_3dp_id(idbuf, comment);
+ if (j == 160)
+ sw_3dp_id(idbuf, comment, sizeof(comment));
break;
}
}
@@ -733,8 +734,10 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
for (i = 0; i < sw->number; i++) {
int bits, code;
- sprintf(sw->name, "Microsoft SideWinder %s", sw_name[sw->type]);
- sprintf(sw->phys[i], "%s/input%d", gameport->phys, i);
+ snprintf(sw->name, sizeof(sw->name),
+ "Microsoft SideWinder %s", sw_name[sw->type]);
+ snprintf(sw->phys[i], sizeof(sw->phys[i]),
+ "%s/input%d", gameport->phys, i);
sw->dev[i] = input_dev = input_allocate_device();
if (!input_dev) {
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index d6f8db8ec3fd..75eb5ca59992 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -220,7 +220,7 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
spaceball->dev = input_dev;
- sprintf(spaceball->phys, "%s/input0", serio->phys);
+ snprintf(spaceball->phys, sizeof(spaceball->phys), "%s/input0", serio->phys);
input_dev->name = spaceball_names[id];
input_dev->phys = spaceball->phys;
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 7c123a01c58e..3e2782e79834 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -177,7 +177,7 @@ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
spaceorb->dev = input_dev;
- sprintf(spaceorb->phys, "%s/input0", serio->phys);
+ snprintf(spaceorb->phys, sizeof(spaceorb->phys), "%s/input0", serio->phys);
input_dev->name = "SpaceTec SpaceOrb 360 / Avenger";
input_dev->phys = spaceorb->phys;
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index 0a9ed1d30636..011ec4858e15 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -148,7 +148,7 @@ static int stinger_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
stinger->dev = input_dev;
- sprintf(stinger->phys, "%s/serio0", serio->phys);
+ snprintf(stinger->phys, sizeof(stinger->phys), "%s/serio0", serio->phys);
input_dev->name = "Gravis Stinger";
input_dev->phys = stinger->phys;
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 7f8b0093c5bc..076f237d9654 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -199,7 +199,7 @@ static int twidjoy_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
twidjoy->dev = input_dev;
- sprintf(twidjoy->phys, "%s/input0", serio->phys);
+ snprintf(twidjoy->phys, sizeof(twidjoy->phys), "%s/input0", serio->phys);
input_dev->name = "Handykey Twiddler";
input_dev->phys = twidjoy->phys;
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 1849b176cf18..f9c1a03214eb 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -154,7 +154,7 @@ static int warrior_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
warrior->dev = input_dev;
- sprintf(warrior->phys, "%s/input0", serio->phys);
+ snprintf(warrior->phys, sizeof(warrior->phys), "%s/input0", serio->phys);
input_dev->name = "Logitech WingMan Warrior";
input_dev->phys = warrior->phys;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index fad04b66d268..ce1f10e8984b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -55,7 +55,7 @@ static int atkbd_softraw = 1;
module_param_named(softraw, atkbd_softraw, bool, 0);
MODULE_PARM_DESC(softraw, "Use software generated rawmode");
-static int atkbd_scroll = 0;
+static int atkbd_scroll;
module_param_named(scroll, atkbd_scroll, bool, 0);
MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards");
@@ -150,8 +150,8 @@ static unsigned char atkbd_unxlate_table[128] = {
#define ATKBD_RET_EMUL0 0xe0
#define ATKBD_RET_EMUL1 0xe1
#define ATKBD_RET_RELEASE 0xf0
-#define ATKBD_RET_HANGUEL 0xf1
-#define ATKBD_RET_HANJA 0xf2
+#define ATKBD_RET_HANJA 0xf1
+#define ATKBD_RET_HANGEUL 0xf2
#define ATKBD_RET_ERR 0xff
#define ATKBD_KEY_UNKNOWN 0
@@ -170,6 +170,13 @@ static unsigned char atkbd_unxlate_table[128] = {
#define ATKBD_LED_EVENT_BIT 0
#define ATKBD_REP_EVENT_BIT 1
+#define ATKBD_XL_ERR 0x01
+#define ATKBD_XL_BAT 0x02
+#define ATKBD_XL_ACK 0x04
+#define ATKBD_XL_NAK 0x08
+#define ATKBD_XL_HANGEUL 0x10
+#define ATKBD_XL_HANJA 0x20
+
static struct {
unsigned char keycode;
unsigned char set2;
@@ -211,8 +218,7 @@ struct atkbd {
unsigned char emul;
unsigned char resend;
unsigned char release;
- unsigned char bat_xl;
- unsigned char err_xl;
+ unsigned long xl_bit;
unsigned int last;
unsigned long time;
@@ -245,17 +251,65 @@ ATKBD_DEFINE_ATTR(set);
ATKBD_DEFINE_ATTR(softrepeat);
ATKBD_DEFINE_ATTR(softraw);
+static const unsigned int xl_table[] = {
+ ATKBD_RET_BAT, ATKBD_RET_ERR, ATKBD_RET_ACK,
+ ATKBD_RET_NAK, ATKBD_RET_HANJA, ATKBD_RET_HANGEUL,
+};
-static void atkbd_report_key(struct input_dev *dev, struct pt_regs *regs, int code, int value)
+/*
+ * Checks if we should mangle the scancode to extract 'release' bit
+ * in translated mode.
+ */
+static int atkbd_need_xlate(unsigned long xl_bit, unsigned char code)
{
- input_regs(dev, regs);
- if (value == 3) {
- input_report_key(dev, code, 1);
- input_sync(dev);
- input_report_key(dev, code, 0);
- } else
- input_event(dev, EV_KEY, code, value);
- input_sync(dev);
+ int i;
+
+ if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(xl_table); i++)
+ if (code == xl_table[i])
+ return test_bit(i, &xl_bit);
+
+ return 1;
+}
+
+/*
+ * Calculates new value of xl_bit so the driver can distinguish
+ * between make/break pair of scancodes for select keys and PS/2
+ * protocol responses.
+ */
+static void atkbd_calculate_xl_bit(struct atkbd *atkbd, unsigned char code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xl_table); i++) {
+ if (!((code ^ xl_table[i]) & 0x7f)) {
+ if (code & 0x80)
+ __clear_bit(i, &atkbd->xl_bit);
+ else
+ __set_bit(i, &atkbd->xl_bit);
+ break;
+ }
+ }
+}
+
+/*
+ * Encode the scancode, 0xe0 prefix, and high bit into a single integer,
+ * keeping kernel 2.4 compatibility for set 2
+ */
+static unsigned int atkbd_compat_scancode(struct atkbd *atkbd, unsigned int code)
+{
+ if (atkbd->set == 3) {
+ if (atkbd->emul == 1)
+ code |= 0x100;
+ } else {
+ code = (code & 0x7f) | ((code & 0x80) << 1);
+ if (atkbd->emul == 1)
+ code |= 0x80;
+ }
+
+ return code;
}
/*
@@ -267,9 +321,11 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
unsigned int flags, struct pt_regs *regs)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
+ struct input_dev *dev = atkbd->dev;
unsigned int code = data;
- int scroll = 0, hscroll = 0, click = -1;
+ int scroll = 0, hscroll = 0, click = -1, add_release_event = 0;
int value;
+ unsigned char keycode;
#ifdef ATKBD_DEBUG
printk(KERN_DEBUG "atkbd.c: Received %02x flags %02x\n", data, flags);
@@ -298,25 +354,17 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (!atkbd->enabled)
goto out;
- input_event(atkbd->dev, EV_MSC, MSC_RAW, code);
+ input_event(dev, EV_MSC, MSC_RAW, code);
if (atkbd->translated) {
- if (atkbd->emul ||
- (code != ATKBD_RET_EMUL0 && code != ATKBD_RET_EMUL1 &&
- code != ATKBD_RET_HANGUEL && code != ATKBD_RET_HANJA &&
- (code != ATKBD_RET_ERR || atkbd->err_xl) &&
- (code != ATKBD_RET_BAT || atkbd->bat_xl))) {
+ if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) {
atkbd->release = code >> 7;
code &= 0x7f;
}
- if (!atkbd->emul) {
- if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f))
- atkbd->bat_xl = !(data >> 7);
- if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f))
- atkbd->err_xl = !(data >> 7);
- }
+ if (!atkbd->emul)
+ atkbd_calculate_xl_bit(atkbd, data);
}
switch (code) {
@@ -333,47 +381,48 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
case ATKBD_RET_RELEASE:
atkbd->release = 1;
goto out;
- case ATKBD_RET_HANGUEL:
- atkbd_report_key(atkbd->dev, regs, KEY_HANGUEL, 3);
+ case ATKBD_RET_ACK:
+ case ATKBD_RET_NAK:
+ printk(KERN_WARNING "atkbd.c: Spurious %s on %s. "
+ "Some program might be trying access hardware directly.\n",
+ data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
goto out;
+ case ATKBD_RET_HANGEUL:
case ATKBD_RET_HANJA:
- atkbd_report_key(atkbd->dev, regs, KEY_HANJA, 3);
- goto out;
+ /*
+ * These keys do not report release and thus need to be
+ * flagged properly
+ */
+ add_release_event = 1;
+ break;
case ATKBD_RET_ERR:
printk(KERN_DEBUG "atkbd.c: Keyboard on %s reports too many keys pressed.\n", serio->phys);
goto out;
}
- if (atkbd->set != 3)
- code = (code & 0x7f) | ((code & 0x80) << 1);
- if (atkbd->emul) {
- if (--atkbd->emul)
- goto out;
- code |= (atkbd->set != 3) ? 0x80 : 0x100;
- }
+ code = atkbd_compat_scancode(atkbd, code);
+
+ if (atkbd->emul && --atkbd->emul)
+ goto out;
- if (atkbd->keycode[code] != ATKBD_KEY_NULL)
- input_event(atkbd->dev, EV_MSC, MSC_SCAN, code);
+ keycode = atkbd->keycode[code];
- switch (atkbd->keycode[code]) {
+ if (keycode != ATKBD_KEY_NULL)
+ input_event(dev, EV_MSC, MSC_SCAN, code);
+
+ switch (keycode) {
case ATKBD_KEY_NULL:
break;
case ATKBD_KEY_UNKNOWN:
- if (data == ATKBD_RET_ACK || data == ATKBD_RET_NAK) {
- printk(KERN_WARNING "atkbd.c: Spurious %s on %s. Some program, "
- "like XFree86, might be trying access hardware directly.\n",
- data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
- } else {
- printk(KERN_WARNING "atkbd.c: Unknown key %s "
- "(%s set %d, code %#x on %s).\n",
- atkbd->release ? "released" : "pressed",
- atkbd->translated ? "translated" : "raw",
- atkbd->set, code, serio->phys);
- printk(KERN_WARNING "atkbd.c: Use 'setkeycodes %s%02x <keycode>' "
- "to make it known.\n",
- code & 0x80 ? "e0" : "", code & 0x7f);
- }
- input_sync(atkbd->dev);
+ printk(KERN_WARNING
+ "atkbd.c: Unknown key %s (%s set %d, code %#x on %s).\n",
+ atkbd->release ? "released" : "pressed",
+ atkbd->translated ? "translated" : "raw",
+ atkbd->set, code, serio->phys);
+ printk(KERN_WARNING
+ "atkbd.c: Use 'setkeycodes %s%02x <keycode>' to make it known.\n",
+ code & 0x80 ? "e0" : "", code & 0x7f);
+ input_sync(dev);
break;
case ATKBD_SCR_1:
scroll = 1 - atkbd->release * 2;
@@ -397,33 +446,35 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
hscroll = 1;
break;
default:
- value = atkbd->release ? 0 :
- (1 + (!atkbd->softrepeat && test_bit(atkbd->keycode[code], atkbd->dev->key)));
-
- switch (value) { /* Workaround Toshiba laptop multiple keypress */
- case 0:
- atkbd->last = 0;
- break;
- case 1:
- atkbd->last = code;
- atkbd->time = jiffies + msecs_to_jiffies(atkbd->dev->rep[REP_DELAY]) / 2;
- break;
- case 2:
- if (!time_after(jiffies, atkbd->time) && atkbd->last == code)
- value = 1;
- break;
+ if (atkbd->release) {
+ value = 0;
+ atkbd->last = 0;
+ } else if (!atkbd->softrepeat && test_bit(keycode, dev->key)) {
+ /* Workaround Toshiba laptop multiple keypress */
+ value = time_before(jiffies, atkbd->time) && atkbd->last == code ? 1 : 2;
+ } else {
+ value = 1;
+ atkbd->last = code;
+ atkbd->time = jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]) / 2;
}
- atkbd_report_key(atkbd->dev, regs, atkbd->keycode[code], value);
+ input_regs(dev, regs);
+ input_event(dev, EV_KEY, keycode, value);
+ input_sync(dev);
+
+ if (value && add_release_event) {
+ input_report_key(dev, keycode, 0);
+ input_sync(dev);
+ }
}
if (atkbd->scroll) {
- input_regs(atkbd->dev, regs);
+ input_regs(dev, regs);
if (click != -1)
- input_report_key(atkbd->dev, BTN_MIDDLE, click);
- input_report_rel(atkbd->dev, REL_WHEEL, scroll);
- input_report_rel(atkbd->dev, REL_HWHEEL, hscroll);
- input_sync(atkbd->dev);
+ input_report_key(dev, BTN_MIDDLE, click);
+ input_report_rel(dev, REL_WHEEL, scroll);
+ input_report_rel(dev, REL_HWHEEL, hscroll);
+ input_sync(dev);
}
atkbd->release = 0;
@@ -764,6 +815,9 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++)
atkbd->keycode[atkbd_scroll_keys[i].set2] = atkbd_scroll_keys[i].keycode;
}
+
+ atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL)] = KEY_HANGUEL;
+ atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA)] = KEY_HANJA;
}
/*
@@ -776,12 +830,15 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd)
int i;
if (atkbd->extra)
- sprintf(atkbd->name, "AT Set 2 Extra keyboard");
+ snprintf(atkbd->name, sizeof(atkbd->name),
+ "AT Set 2 Extra keyboard");
else
- sprintf(atkbd->name, "AT %s Set %d keyboard",
- atkbd->translated ? "Translated" : "Raw", atkbd->set);
+ snprintf(atkbd->name, sizeof(atkbd->name),
+ "AT %s Set %d keyboard",
+ atkbd->translated ? "Translated" : "Raw", atkbd->set);
- sprintf(atkbd->phys, "%s/input0", atkbd->ps2dev.serio->phys);
+ snprintf(atkbd->phys, sizeof(atkbd->phys),
+ "%s/input0", atkbd->ps2dev.serio->phys);
input_dev->name = atkbd->name;
input_dev->phys = atkbd->phys;
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 77c4d9669ad0..5174224cadb4 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -384,18 +384,21 @@ lkkbd_detection_done (struct lkkbd *lk)
*/
switch (lk->id[4]) {
case 1:
- sprintf (lk->name, "DEC LK201 keyboard");
+ strlcpy (lk->name, "DEC LK201 keyboard",
+ sizeof (lk->name));
if (lk201_compose_is_alt)
lk->keycode[0xb1] = KEY_LEFTALT;
break;
case 2:
- sprintf (lk->name, "DEC LK401 keyboard");
+ strlcpy (lk->name, "DEC LK401 keyboard",
+ sizeof (lk->name));
break;
default:
- sprintf (lk->name, "Unknown DEC keyboard");
+ strlcpy (lk->name, "Unknown DEC keyboard",
+ sizeof (lk->name));
printk (KERN_ERR "lkkbd: keyboard on %s is unknown, "
"please report to Jan-Benedict Glaw "
"<jbglaw@lug-owl.de>\n", lk->phys);
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index d10983c521e6..40a3f551247e 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -96,7 +96,7 @@ static int nkbd_connect(struct serio *serio, struct serio_driver *drv)
nkbd->serio = serio;
nkbd->dev = input_dev;
- sprintf(nkbd->phys, "%s/input0", serio->phys);
+ snprintf(nkbd->phys, sizeof(nkbd->phys), "%s/input0", serio->phys);
memcpy(nkbd->keycode, nkbd_keycode, sizeof(nkbd->keycode));
input_dev->name = "Newton Keyboard";
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index b15b6d8d4f83..9dbd7b85686d 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -263,7 +263,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
}
- sprintf(sunkbd->name, "Sun Type %d keyboard", sunkbd->type);
+ snprintf(sunkbd->name, sizeof(sunkbd->name), "Sun Type %d keyboard", sunkbd->type);
memcpy(sunkbd->keycode, sunkbd_keycode, sizeof(sunkbd->keycode));
input_dev->name = sunkbd->name;
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 4135e3e16c51..0821d53cf0c1 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -100,7 +100,7 @@ static int xtkbd_connect(struct serio *serio, struct serio_driver *drv)
xtkbd->serio = serio;
xtkbd->dev = input_dev;
- sprintf(xtkbd->phys, "%s/input0", serio->phys);
+ snprintf(xtkbd->phys, sizeof(xtkbd->phys), "%s/input0", serio->phys);
memcpy(xtkbd->keycode, xtkbd_keycode, sizeof(xtkbd->keycode));
input_dev->name = "XT Keyboard";
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index e4e5be111c96..ccf0faeee5c1 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -285,6 +285,15 @@ static struct key_entry keymap_fujitsu_n3510[] = {
{ KE_END, 0 }
};
+static struct key_entry keymap_wistron_ms2111[] = {
+ { KE_KEY, 0x11, KEY_PROG1 },
+ { KE_KEY, 0x12, KEY_PROG2 },
+ { KE_KEY, 0x13, KEY_PROG3 },
+ { KE_KEY, 0x31, KEY_MAIL },
+ { KE_KEY, 0x36, KEY_WWW },
+ { KE_END, 0 }
+};
+
static struct key_entry keymap_wistron_ms2141[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
@@ -326,6 +335,7 @@ static struct key_entry keymap_aopen_1559as[] = {
{ KE_WIFI, 0x30, 0 },
{ KE_KEY, 0x31, KEY_MAIL },
{ KE_KEY, 0x36, KEY_WWW },
+ { KE_END, 0 },
};
/*
@@ -388,6 +398,15 @@ static struct dmi_system_id dmi_ids[] = {
},
.driver_data = keymap_aopen_1559as
},
+ {
+ .callback = dmi_matched,
+ .ident = "Medion MD 9783",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MD 9783"),
+ },
+ .driver_data = keymap_wistron_ms2111
+ },
{ NULL, }
};
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a0e2e797c6d5..070d75330afd 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -470,7 +470,7 @@ int alps_init(struct psmouse *psmouse)
dev1->keybit[LONG(BTN_BACK)] |= BIT(BTN_BACK);
}
- sprintf(priv->phys, "%s/input1", psmouse->ps2dev.serio->phys);
+ snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
dev2->name = (priv->i->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse";
dev2->id.bustype = BUS_I8042;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 136321a2cfdb..8bc9f51ae6c2 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -150,9 +150,20 @@ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse, struct pt_reg
*/
if (psmouse->type == PSMOUSE_IMEX) {
- input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
- input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
+ switch (packet[3] & 0xC0) {
+ case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x00:
+ case 0xC0:
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
+ input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
+ break;
+ }
}
/*
@@ -466,9 +477,25 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
if (param[0] != 4)
return -1;
+/* Magic to enable horizontal scrolling on IntelliMouse 4.0 */
+ param[0] = 200;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+ param[0] = 80;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+ param[0] = 40;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+
+ param[0] = 200;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+ param[0] = 200;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+ param[0] = 60;
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+
if (set_properties) {
set_bit(BTN_MIDDLE, psmouse->dev->keybit);
set_bit(REL_WHEEL, psmouse->dev->relbit);
+ set_bit(REL_HWHEEL, psmouse->dev->relbit);
set_bit(BTN_SIDE, psmouse->dev->keybit);
set_bit(BTN_EXTRA, psmouse->dev->keybit);
@@ -1057,8 +1084,8 @@ static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_proto
if (psmouse->resync_time && psmouse->poll(psmouse))
psmouse->resync_time = 0;
- sprintf(psmouse->devname, "%s %s %s",
- psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name);
+ snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s",
+ psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name);
input_dev->name = psmouse->devname;
input_dev->phys = psmouse->phys;
@@ -1099,7 +1126,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
ps2_init(&psmouse->ps2dev, serio);
INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
psmouse->dev = input_dev;
- sprintf(psmouse->phys, "%s/input0", serio->phys);
+ snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 2f9a04ae725f..a89742431717 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -254,7 +254,7 @@ static int sermouse_connect(struct serio *serio, struct serio_driver *drv)
goto fail;
sermouse->dev = input_dev;
- sprintf(sermouse->phys, "%s/input0", serio->phys);
+ snprintf(sermouse->phys, sizeof(sermouse->phys), "%s/input0", serio->phys);
sermouse->type = serio->id.proto;
input_dev->name = sermouse_protocols[sermouse->type];
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index 36e9442a16b2..7b85bc21ae4a 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -153,22 +153,25 @@ vsxxxaa_detection_done (struct vsxxxaa *mouse)
{
switch (mouse->type) {
case 0x02:
- sprintf (mouse->name, "DEC VSXXX-AA/-GA mouse");
+ strlcpy (mouse->name, "DEC VSXXX-AA/-GA mouse",
+ sizeof (mouse->name));
break;
case 0x04:
- sprintf (mouse->name, "DEC VSXXX-AB digitizer");
+ strlcpy (mouse->name, "DEC VSXXX-AB digitizer",
+ sizeof (mouse->name));
break;
default:
- sprintf (mouse->name, "unknown DEC pointer device "
- "(type = 0x%02x)", mouse->type);
+ snprintf (mouse->name, sizeof (mouse->name),
+ "unknown DEC pointer device (type = 0x%02x)",
+ mouse->type);
break;
}
- printk (KERN_INFO "Found %s version 0x%02x from country 0x%02x "
- "on port %s\n", mouse->name, mouse->version,
- mouse->country, mouse->phys);
+ printk (KERN_INFO
+ "Found %s version 0x%02x from country 0x%02x on port %s\n",
+ mouse->name, mouse->version, mouse->country, mouse->phys);
}
/*
@@ -503,8 +506,9 @@ vsxxxaa_connect (struct serio *serio, struct serio_driver *drv)
mouse->dev = input_dev;
mouse->serio = serio;
- sprintf (mouse->name, "DEC VSXXX-AA/-GA mouse or VSXXX-AB digitizer");
- sprintf (mouse->phys, "%s/input0", serio->phys);
+ strlcat (mouse->name, "DEC VSXXX-AA/-GA mouse or VSXXX-AB digitizer",
+ sizeof (mouse->name));
+ snprintf (mouse->phys, sizeof (mouse->phys), "%s/input0", serio->phys);
input_dev->name = mouse->name;
input_dev->phys = mouse->phys;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b685a507955d..eb721b11ff37 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -123,7 +123,9 @@ static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mous
if (mousedev->touch) {
size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
- if (size == 0) size = 256 * 2;
+ if (size == 0)
+ size = 256 * 2;
+
switch (code) {
case ABS_X:
fx(0) = value;
@@ -155,18 +157,24 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
switch (code) {
case ABS_X:
size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
- if (size == 0) size = xres ? : 1;
- if (value > dev->absmax[ABS_X]) value = dev->absmax[ABS_X];
- if (value < dev->absmin[ABS_X]) value = dev->absmin[ABS_X];
+ if (size == 0)
+ size = xres ? : 1;
+ if (value > dev->absmax[ABS_X])
+ value = dev->absmax[ABS_X];
+ if (value < dev->absmin[ABS_X])
+ value = dev->absmin[ABS_X];
mousedev->packet.x = ((value - dev->absmin[ABS_X]) * xres) / size;
mousedev->packet.abs_event = 1;
break;
case ABS_Y:
size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
- if (size == 0) size = yres ? : 1;
- if (value > dev->absmax[ABS_Y]) value = dev->absmax[ABS_Y];
- if (value < dev->absmin[ABS_Y]) value = dev->absmin[ABS_Y];
+ if (size == 0)
+ size = yres ? : 1;
+ if (value > dev->absmax[ABS_Y])
+ value = dev->absmax[ABS_Y];
+ if (value < dev->absmin[ABS_Y])
+ value = dev->absmin[ABS_Y];
mousedev->packet.y = yres - ((value - dev->absmin[ABS_Y]) * yres) / size;
mousedev->packet.abs_event = 1;
break;
@@ -202,7 +210,7 @@ static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int
case BTN_SIDE: index = 3; break;
case BTN_4:
case BTN_EXTRA: index = 4; break;
- default: return;
+ default: return;
}
if (value) {
@@ -285,10 +293,9 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
mousedev->touch = mousedev->pkt_count = 0;
mousedev->frac_dx = 0;
mousedev->frac_dy = 0;
- }
- else
- if (!mousedev->touch)
- mousedev->touch = jiffies;
+
+ } else if (!mousedev->touch)
+ mousedev->touch = jiffies;
}
static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
@@ -327,7 +334,7 @@ static void mousedev_event(struct input_handle *handle, unsigned int type, unsig
mousedev->pkt_count++;
/* Input system eats duplicate events, but we need all of them
* to do correct averaging so apply present one forward
- */
+ */
fx(0) = fx(1);
fy(0) = fy(1);
}
@@ -346,7 +353,9 @@ static int mousedev_fasync(int fd, struct file *file, int on)
{
int retval;
struct mousedev_list *list = file->private_data;
+
retval = fasync_helper(fd, file, on, &list->fasync);
+
return retval < 0 ? retval : 0;
}
@@ -507,14 +516,16 @@ static ssize_t mousedev_write(struct file * file, const char __user * buffer, si
list->imexseq = 0;
list->mode = MOUSEDEV_EMUL_EXPS;
}
- } else list->imexseq = 0;
+ } else
+ list->imexseq = 0;
if (c == mousedev_imps_seq[list->impsseq]) {
if (++list->impsseq == MOUSEDEV_SEQ_LEN) {
list->impsseq = 0;
list->mode = MOUSEDEV_EMUL_IMPS;
}
- } else list->impsseq = 0;
+ } else
+ list->impsseq = 0;
list->ps2[0] = 0xfa;
@@ -598,6 +609,7 @@ static ssize_t mousedev_read(struct file * file, char __user * buffer, size_t co
static unsigned int mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_list *list = file->private_data;
+
poll_wait(file, &list->mousedev->wait, wait);
return ((list->ready || list->buffer) ? (POLLIN | POLLRDNORM) : 0) |
(list->mousedev->exist ? 0 : (POLLHUP | POLLERR));
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 096b6a0b5cca..1ac739ef2ffa 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -189,7 +189,7 @@ static int __devinit ct82c710_probe(struct platform_device *dev)
strlcpy(ct82c710_port->name, "C&T 82c710 mouse port",
sizeof(ct82c710_port->name));
snprintf(ct82c710_port->phys, sizeof(ct82c710_port->phys),
- "isa%04lx/serio0", CT82C710_DATA);
+ "isa%16llx/serio0", (unsigned long long)CT82C710_DATA);
serio_register_port(ct82c710_port);
@@ -241,8 +241,8 @@ static int __init ct82c710_init(void)
serio_register_port(ct82c710_port);
- printk(KERN_INFO "serio: C&T 82c710 mouse port at %#lx irq %d\n",
- CT82C710_DATA, CT82C710_IRQ);
+ printk(KERN_INFO "serio: C&T 82c710 mouse port at %#llx irq %d\n",
+ (unsigned long long)CT82C710_DATA, CT82C710_IRQ);
return 0;
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 466da190ceec..b769b21973b7 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -129,7 +129,7 @@ static int gunze_connect(struct serio *serio, struct serio_driver *drv)
gunze->serio = serio;
gunze->dev = input_dev;
- sprintf(gunze->phys, "%s/input0", serio->phys);
+ snprintf(gunze->phys, sizeof(serio->phys), "%s/input0", serio->phys);
input_dev->private = gunze;
input_dev->name = "Gunze AHL-51S TouchScreen";
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index a595d386312f..2de2139f2fed 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -363,7 +363,7 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
ts->serio = serio;
ts->dev = input_dev;
- sprintf(ts->phys, "%s/input0", serio->phys);
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", serio->phys);
input_dev->name = "H3600 TouchScreen";
input_dev->phys = ts->phys;
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index 1d0d37eeef6e..8647a905df80 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -143,7 +143,7 @@ static int mtouch_connect(struct serio *serio, struct serio_driver *drv)
mtouch->serio = serio;
mtouch->dev = input_dev;
- sprintf(mtouch->phys, "%s/input0", serio->phys);
+ snprintf(mtouch->phys, sizeof(mtouch->phys), "%s/input0", serio->phys);
input_dev->private = mtouch;
input_dev->name = "MicroTouch Serial TouchScreen";
diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c
index d678d144bbf8..5f9ecad2ca75 100644
--- a/drivers/input/tsdev.c
+++ b/drivers/input/tsdev.c
@@ -35,7 +35,7 @@
* e-mail - mail your message to <jsimmons@infradead.org>.
*/
-#define TSDEV_MINOR_BASE 128
+#define TSDEV_MINOR_BASE 128
#define TSDEV_MINORS 32
/* First 16 devices are h3600_ts compatible; second 16 are h3600_tsraw */
#define TSDEV_MINOR_MASK 15
@@ -230,6 +230,7 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count,
static unsigned int tsdev_poll(struct file *file, poll_table * wait)
{
struct tsdev_list *list = file->private_data;
+
poll_wait(file, &list->tsdev->wait, wait);
return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) |
(list->tsdev->exist ? 0 : (POLLHUP | POLLERR));
@@ -248,11 +249,13 @@ static int tsdev_ioctl(struct inode *inode, struct file *file,
sizeof (struct ts_calibration)))
retval = -EFAULT;
break;
+
case TS_SET_CAL:
if (copy_from_user (&tsdev->cal, (void __user *)arg,
sizeof (struct ts_calibration)))
retval = -EFAULT;
break;
+
default:
retval = -EINVAL;
break;
@@ -284,9 +287,11 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
case ABS_X:
tsdev->x = value;
break;
+
case ABS_Y:
tsdev->y = value;
break;
+
case ABS_PRESSURE:
if (value > handle->dev->absmax[ABS_PRESSURE])
value = handle->dev->absmax[ABS_PRESSURE];
@@ -307,6 +312,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
else if (tsdev->x > xres)
tsdev->x = xres;
break;
+
case REL_Y:
tsdev->y += value;
if (tsdev->y < 0)
@@ -323,6 +329,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
case 0:
tsdev->pressure = 0;
break;
+
case 1:
if (!tsdev->pressure)
tsdev->pressure = 1;
@@ -370,9 +377,8 @@ static struct input_handle *tsdev_connect(struct input_handler *handler,
struct class_device *cdev;
int minor, delta;
- for (minor = 0; minor < TSDEV_MINORS/2 && tsdev_table[minor];
- minor++);
- if (minor >= TSDEV_MINORS/2) {
+ for (minor = 0; minor < TSDEV_MINORS / 2 && tsdev_table[minor]; minor++);
+ if (minor >= TSDEV_MINORS / 2) {
printk(KERN_ERR
"tsdev: You have way too many touchscreens\n");
return NULL;
@@ -444,22 +450,22 @@ static struct input_device_id tsdev_ids[] = {
.evbit = { BIT(EV_KEY) | BIT(EV_REL) },
.keybit = { [LONG(BTN_LEFT)] = BIT(BTN_LEFT) },
.relbit = { BIT(REL_X) | BIT(REL_Y) },
- },/* A mouse like device, at least one button, two relative axes */
+ }, /* A mouse like device, at least one button, two relative axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT(EV_KEY) | BIT(EV_ABS) },
.keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
.absbit = { BIT(ABS_X) | BIT(ABS_Y) },
- },/* A tablet like device, at least touch detection, two absolute axes */
+ }, /* A tablet like device, at least touch detection, two absolute axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT(EV_ABS) },
.absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) },
- },/* A tablet like device with several gradations of pressure */
+ }, /* A tablet like device with several gradations of pressure */
- {},/* Terminating entry */
+ {} /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, tsdev_ids);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 173c899a1fb4..2e541fa02024 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -87,6 +87,11 @@ struct capincci;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor;
+struct datahandle_queue {
+ struct list_head list;
+ u16 datahandle;
+};
+
struct capiminor {
struct list_head list;
struct capincci *nccip;
@@ -109,12 +114,9 @@ struct capiminor {
int outbytes;
/* transmit path */
- struct datahandle_queue {
- struct datahandle_queue *next;
- u16 datahandle;
- } *ackqueue;
+ struct list_head ackqueue;
int nack;
-
+ spinlock_t ackqlock;
};
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -156,48 +158,54 @@ static LIST_HEAD(capiminor_list);
static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue *n, **pp;
+ struct datahandle_queue *n;
+ unsigned long flags;
n = kmalloc(sizeof(*n), GFP_ATOMIC);
- if (!n) {
- printk(KERN_ERR "capi: alloc datahandle failed\n");
- return -1;
+ if (unlikely(!n)) {
+ printk(KERN_ERR "capi: alloc datahandle failed\n");
+ return -1;
}
- n->next = NULL;
n->datahandle = datahandle;
- for (pp = &mp->ackqueue; *pp; pp = &(*pp)->next) ;
- *pp = n;
+ INIT_LIST_HEAD(&n->list);
+ spin_lock_irqsave(&mp->ackqlock, flags);
+ list_add_tail(&n->list, &mp->ackqueue);
mp->nack++;
+ spin_unlock_irqrestore(&mp->ackqlock, flags);
return 0;
}
static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue **pp, *p;
+ struct datahandle_queue *p, *tmp;
+ unsigned long flags;
- for (pp = &mp->ackqueue; *pp; pp = &(*pp)->next) {
- if ((*pp)->datahandle == datahandle) {
- p = *pp;
- *pp = (*pp)->next;
+ spin_lock_irqsave(&mp->ackqlock, flags);
+ list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
+ if (p->datahandle == datahandle) {
+ list_del(&p->list);
kfree(p);
mp->nack--;
+ spin_unlock_irqrestore(&mp->ackqlock, flags);
return 0;
}
}
+ spin_unlock_irqrestore(&mp->ackqlock, flags);
return -1;
}
static void capiminor_del_all_ack(struct capiminor *mp)
{
- struct datahandle_queue **pp, *p;
+ struct datahandle_queue *p, *tmp;
+ unsigned long flags;
- pp = &mp->ackqueue;
- while (*pp) {
- p = *pp;
- *pp = (*pp)->next;
+ spin_lock_irqsave(&mp->ackqlock, flags);
+ list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
+ list_del(&p->list);
kfree(p);
mp->nack--;
}
+ spin_unlock_irqrestore(&mp->ackqlock, flags);
}
@@ -220,6 +228,8 @@ static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
mp->ncci = ncci;
mp->msgid = 0;
atomic_set(&mp->ttyopencount,0);
+ INIT_LIST_HEAD(&mp->ackqueue);
+ spin_lock_init(&mp->ackqlock);
skb_queue_head_init(&mp->inqueue);
skb_queue_head_init(&mp->outqueue);
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index f1a1f9a9b88e..1f5ebe9ee72c 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -592,7 +592,7 @@ static int put_address(char *st, u_char *p, int len)
} /* put_address */
/*************************************/
-/* report a succesfull interrogation */
+/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
{ char *src = ic->parm.dss1_io.data;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index eb41aba3ddef..8a45715dd4c1 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -65,23 +65,22 @@ static struct usb_device_id gigaset_table [] = {
MODULE_DEVICE_TABLE(usb, gigaset_table);
-/*======================= local function prototypes =============================*/
+/*======================= local function prototypes ==========================*/
-/* This function is called if a new device is connected to the USB port. It
- * checks whether this new device belongs to this driver.
- */
+/* function called if a new device belonging to this driver is connected */
static int gigaset_probe(struct usb_interface *interface,
const struct usb_device_id *id);
/* Function will be called if the device is unplugged */
static void gigaset_disconnect(struct usb_interface *interface);
-static void read_ctrl_callback(struct urb *, struct pt_regs *);
+static int atread_submit(struct cardstate *, int);
static void stopurbs(struct bas_bc_state *);
+static int req_submit(struct bc_state *, int, int, int);
static int atwrite_submit(struct cardstate *, unsigned char *, int);
static int start_cbsend(struct cardstate *);
-/*==============================================================================*/
+/*============================================================================*/
struct bas_cardstate {
struct usb_device *udev; /* USB device pointer */
@@ -91,6 +90,7 @@ struct bas_cardstate {
struct urb *urb_ctrl; /* control pipe default URB */
struct usb_ctrlrequest dr_ctrl;
struct timer_list timer_ctrl; /* control request timeout */
+ int retry_ctrl;
struct timer_list timer_atrdy; /* AT command ready timeout */
struct urb *urb_cmd_out; /* for sending AT commands */
@@ -307,6 +307,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
* hang up any existing connection because of an unrecoverable error
* This function may be called from any context and takes care of scheduling
* the necessary actions for execution outside of interrupt context.
+ * cs->lock must not be held.
* argument:
* B channel control structure
*/
@@ -325,14 +326,17 @@ static inline void error_hangup(struct bc_state *bcs)
/* error_reset
* reset Gigaset device because of an unrecoverable error
- * This function may be called from any context, and should take care of
+ * This function may be called from any context, and takes care of
* scheduling the necessary actions for execution outside of interrupt context.
- * Right now, it just generates a kernel message calling for help.
+ * cs->lock must not be held.
* argument:
* controller state structure
*/
static inline void error_reset(struct cardstate *cs)
{
+ /* close AT command channel to recover (ignore errors) */
+ req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
+
//FIXME try to recover without bothering the user
dev_err(cs->dev,
"unrecoverable error - please disconnect Gigaset base to reset\n");
@@ -403,14 +407,30 @@ static void cmd_in_timeout(unsigned long data)
{
struct cardstate *cs = (struct cardstate *) data;
struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
if (!ucs->rcvbuf_size) {
gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
return;
}
- dev_err(cs->dev, "timeout reading AT response\n");
- error_reset(cs); //FIXME retry?
+ if (ucs->retry_cmd_in++ < BAS_RETRY) {
+ dev_notice(cs->dev, "control read: timeout, retry %d\n",
+ ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc >= 0 || rc == -ENODEV)
+ /* resubmitted or disconnected */
+ /* - bypass regular exit block */
+ return;
+ } else {
+ dev_err(cs->dev,
+ "control read: timeout, giving up after %d tries\n",
+ ucs->retry_cmd_in);
+ }
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ error_reset(cs);
}
/* set/clear bits in base connection state, return previous state
@@ -428,6 +448,96 @@ inline static int update_basstate(struct bas_cardstate *ucs,
return state;
}
+/* read_ctrl_callback
+ * USB completion handler for control pipe input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block
+ * urb->context = inbuf structure for controller state
+ */
+static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct inbuf_t *inbuf = urb->context;
+ struct cardstate *cs = inbuf->cs;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int have_data = 0;
+ unsigned numbytes;
+ int rc;
+
+ update_basstate(ucs, 0, BS_ATRDPEND);
+
+ if (!ucs->rcvbuf_size) {
+ dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
+ return;
+ }
+
+ del_timer(&ucs->timer_cmd_in);
+
+ switch (urb->status) {
+ case 0: /* normal completion */
+ numbytes = urb->actual_length;
+ if (unlikely(numbytes != ucs->rcvbuf_size)) {
+ dev_warn(cs->dev,
+ "control read: received %d chars, expected %d\n",
+ numbytes, ucs->rcvbuf_size);
+ if (numbytes > ucs->rcvbuf_size)
+ numbytes = ucs->rcvbuf_size;
+ }
+
+ /* copy received bytes to inbuf */
+ have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
+
+ if (unlikely(numbytes < ucs->rcvbuf_size)) {
+ /* incomplete - resubmit for remaining bytes */
+ ucs->rcvbuf_size -= numbytes;
+ ucs->retry_cmd_in = 0;
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc >= 0 || rc == -ENODEV)
+ /* resubmitted or disconnected */
+ /* - bypass regular exit block */
+ return;
+ error_reset(cs);
+ }
+ break;
+
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* no action necessary */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(urb->status));
+ break;
+
+ default: /* severe trouble */
+ dev_warn(cs->dev, "control read: %s\n",
+ get_usb_statmsg(urb->status));
+ if (ucs->retry_cmd_in++ < BAS_RETRY) {
+ dev_notice(cs->dev, "control read: retry %d\n",
+ ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc >= 0 || rc == -ENODEV)
+ /* resubmitted or disconnected */
+ /* - bypass regular exit block */
+ return;
+ } else {
+ dev_err(cs->dev,
+ "control read: giving up after %d tries\n",
+ ucs->retry_cmd_in);
+ }
+ error_reset(cs);
+ }
+
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ if (have_data) {
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
+ }
+}
+
/* atread_submit
* submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
* parameters:
@@ -466,7 +576,7 @@ static int atread_submit(struct cardstate *cs, int timeout)
if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
update_basstate(ucs, 0, BS_ATRDPEND);
dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
- get_usb_statmsg(ret));
+ get_usb_rcmsg(ret));
return ret;
}
@@ -611,9 +721,12 @@ static void read_int_callback(struct urb *urb, struct pt_regs *regs)
kfree(ucs->rcvbuf);
ucs->rcvbuf = NULL;
ucs->rcvbuf_size = 0;
- if (rc != -ENODEV)
+ if (rc != -ENODEV) {
//FIXME corrective action?
+ spin_unlock_irqrestore(&cs->lock, flags);
error_reset(cs);
+ break;
+ }
}
spin_unlock_irqrestore(&cs->lock, flags);
break;
@@ -643,97 +756,6 @@ resubmit:
}
}
-/* read_ctrl_callback
- * USB completion handler for control pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block
- * urb->context = inbuf structure for controller state
- */
-static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
-{
- struct inbuf_t *inbuf = urb->context;
- struct cardstate *cs = inbuf->cs;
- struct bas_cardstate *ucs = cs->hw.bas;
- int have_data = 0;
- unsigned numbytes;
- int rc;
-
- update_basstate(ucs, 0, BS_ATRDPEND);
-
- if (!ucs->rcvbuf_size) {
- dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
- return;
- }
-
- del_timer(&ucs->timer_cmd_in);
-
- switch (urb->status) {
- case 0: /* normal completion */
- numbytes = urb->actual_length;
- if (unlikely(numbytes == 0)) {
- dev_warn(cs->dev,
- "control read: empty block received\n");
- goto retry;
- }
- if (unlikely(numbytes != ucs->rcvbuf_size)) {
- dev_warn(cs->dev,
- "control read: received %d chars, expected %d\n",
- numbytes, ucs->rcvbuf_size);
- if (numbytes > ucs->rcvbuf_size)
- numbytes = ucs->rcvbuf_size;
- }
-
- /* copy received bytes to inbuf */
- have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
-
- if (unlikely(numbytes < ucs->rcvbuf_size)) {
- /* incomplete - resubmit for remaining bytes */
- ucs->rcvbuf_size -= numbytes;
- ucs->retry_cmd_in = 0;
- goto retry;
- }
- break;
-
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* no action necessary */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
- break;
-
- default: /* severe trouble */
- dev_warn(cs->dev, "control read: %s\n",
- get_usb_statmsg(urb->status));
- retry:
- if (ucs->retry_cmd_in++ < BAS_RETRY) {
- dev_notice(cs->dev, "control read: retry %d\n",
- ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
- return;
- } else {
- dev_err(cs->dev,
- "control read: giving up after %d tries\n",
- ucs->retry_cmd_in);
- }
- error_reset(cs);
- }
-
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- if (have_data) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- }
-}
-
/* read_iso_callback
* USB completion handler for B channel isochronous input
* called by the USB subsystem in interrupt context
@@ -1378,6 +1400,7 @@ static void req_timeout(unsigned long data)
case HD_CLOSE_B1CHANNEL:
dev_err(bcs->cs->dev, "timeout closing channel %d\n",
bcs->channel + 1);
+ error_reset(bcs->cs);
break;
default:
@@ -1396,22 +1419,61 @@ static void req_timeout(unsigned long data)
static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs)
{
struct bas_cardstate *ucs = urb->context;
+ int rc;
unsigned long flags;
- spin_lock_irqsave(&ucs->lock, flags);
- if (urb->status && ucs->pending) {
- dev_err(&ucs->interface->dev,
- "control request 0x%02x failed: %s\n",
- ucs->pending, get_usb_statmsg(urb->status));
- del_timer(&ucs->timer_ctrl);
- ucs->pending = 0;
- }
- /* individual handling of specific request types */
- switch (ucs->pending) {
- case HD_DEVICE_INIT_ACK: /* no reply expected */
- ucs->pending = 0;
+ /* check status */
+ switch (urb->status) {
+ case 0: /* normal completion */
+ spin_lock_irqsave(&ucs->lock, flags);
+ switch (ucs->pending) {
+ case HD_DEVICE_INIT_ACK: /* no reply expected */
+ del_timer(&ucs->timer_ctrl);
+ ucs->pending = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return;
+
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* ignore silently */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(urb->status));
break;
+
+ default: /* any failure */
+ if (++ucs->retry_ctrl > BAS_RETRY) {
+ dev_err(&ucs->interface->dev,
+ "control request 0x%02x failed: %s\n",
+ ucs->dr_ctrl.bRequest,
+ get_usb_statmsg(urb->status));
+ break; /* give up */
+ }
+ dev_notice(&ucs->interface->dev,
+ "control request 0x%02x: %s, retry %d\n",
+ ucs->dr_ctrl.bRequest, get_usb_statmsg(urb->status),
+ ucs->retry_ctrl);
+ /* urb->dev is clobbered by USB subsystem */
+ urb->dev = ucs->udev;
+ rc = usb_submit_urb(urb, SLAB_ATOMIC);
+ if (unlikely(rc)) {
+ dev_err(&ucs->interface->dev,
+ "could not resubmit request 0x%02x: %s\n",
+ ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
+ break;
+ }
+ /* resubmitted */
+ return;
}
+
+ /* failed, clear pending request */
+ spin_lock_irqsave(&ucs->lock, flags);
+ del_timer(&ucs->timer_ctrl);
+ ucs->pending = 0;
spin_unlock_irqrestore(&ucs->lock, flags);
}
@@ -1455,9 +1517,11 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
usb_sndctrlpipe(ucs->udev, 0),
(unsigned char*) &ucs->dr_ctrl, NULL, 0,
write_ctrl_callback, ucs);
- if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) {
+ ucs->retry_ctrl = 0;
+ ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC);
+ if (unlikely(ret)) {
dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
- req, get_usb_statmsg(ret));
+ req, get_usb_rcmsg(ret));
spin_unlock_irqrestore(&ucs->lock, flags);
return ret;
}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index acb7e2656780..2a56bf33a673 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -981,7 +981,7 @@ exit:
EXPORT_SYMBOL_GPL(gigaset_stop);
static LIST_HEAD(drivers);
-static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(driver_lock);
struct cardstate *gigaset_get_cs_by_id(int id)
{
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 18e05c09b71c..44f02dbd1111 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -1262,7 +1262,8 @@ static void do_action(int action, struct cardstate *cs,
break;
case ACT_HUPMODEM:
/* send "+++" (hangup in unimodem mode) */
- cs->ops->write_cmd(cs, "+++", 3, NULL);
+ if (cs->connected)
+ cs->ops->write_cmd(cs, "+++", 3, NULL);
break;
case ACT_RING:
/* get fresh AT state structure for new CID */
@@ -1294,7 +1295,6 @@ static void do_action(int action, struct cardstate *cs,
break;
case ACT_ICALL:
handle_icall(cs, bcs, p_at_state);
- at_state = *p_at_state;
break;
case ACT_FAILSDOWN:
dev_warn(cs->dev, "Could not shut down the device.\n");
@@ -1334,10 +1334,8 @@ static void do_action(int action, struct cardstate *cs,
*/
at_state->pending_commands |= PC_DLE0;
atomic_set(&cs->commands_pending, 1);
- } else {
+ } else
disconnect(p_at_state);
- at_state = *p_at_state;
- }
break;
case ACT_FAKEDLE0:
at_state->int_var[VAR_ZDLE] = 0;
@@ -1354,10 +1352,8 @@ static void do_action(int action, struct cardstate *cs,
at_state->cid = -1;
if (bcs && cs->onechannel)
at_state->pending_commands |= PC_DLE0;
- else {
+ else
disconnect(p_at_state);
- at_state = *p_at_state;
- }
schedule_init(cs, MS_RECOVER);
break;
case ACT_FAILDLE0:
@@ -1410,7 +1406,6 @@ static void do_action(int action, struct cardstate *cs,
case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */
disconnect(p_at_state);
- at_state = *p_at_state;
break;
case ACT_ABORTDIAL: /* error/timeout during dial preparation */
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 91d25acb5ede..3622720f0505 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1688,7 +1688,7 @@ setup_hfcpci(struct IsdnCard *card)
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
return (0);
}
- cs->hw.hfcpci.pci_io = (char *) dev_hfcpci->resource[ 1].start;
+ cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
} else {
printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index abecabf8c271..aacbf0d14b64 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1402,12 +1402,12 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
}
/* No, locate it in the table */
if (cset == 0) {
- for (i = 0; i < IESIZE; i++)
+ for (i = 0; i < IESIZE_NI1; i++)
if (*buf == ielist_ni1[i].nr)
break;
/* When not found, give appropriate msg */
- if (i != IESIZE) {
+ if (i != IESIZE_NI1) {
dp += sprintf(dp, " %s\n", ielist_ni1[i].descr);
dp += ielist_ni1[i].f(dp, buf);
} else
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index e2bb4fd8e25e..e82ab2251b82 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -311,8 +311,9 @@ setup_telespci(struct IsdnCard *card)
}
cs->hw.teles0.membase = ioremap(pci_resource_start(dev_tel, 0),
PAGE_SIZE);
- printk(KERN_INFO "Found: Zoran, base-address: 0x%lx, irq: 0x%x\n",
- pci_resource_start(dev_tel, 0), dev_tel->irq);
+ printk(KERN_INFO "Found: Zoran, base-address: 0x%llx, irq: 0x%x\n",
+ (unsigned long long)pci_resource_start(dev_tel, 0),
+ dev_tel->irq);
} else {
printk(KERN_WARNING "TelesPCI: No PCI card found\n");
return(0);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 81accdf35168..b26e509ec759 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -933,7 +933,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
count_put = count_pull;
if(count_put > 1)
tty_insert_flip_string(tty, skb->data, count_put - 1);
- last = skb->data[count_put] - 1;
+ last = skb->data[count_put - 1];
len -= count_put;
#ifdef CONFIG_ISDN_AUDIO
}
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2ac90242d263..433389daedb2 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -82,7 +82,7 @@ isdn_tty_try_read(modem_info * info, struct sk_buff *skb)
int l = skb->len;
unsigned char *dp = skb->data;
while (--l) {
- if (*skb->data == DLE)
+ if (*dp == DLE)
tty_insert_flip_char(tty, DLE, 0);
tty_insert_flip_char(tty, *dp++, 0);
}
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index 743ac4077f35..8b3efc243161 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -208,7 +208,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
*/
static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
{
- struct sk_buff * skb = dev_alloc_skb(1);
+ struct sk_buff * skb;
enum wan_states *state_p
= &( ( (ix25_pdata_t*) (cprot->proto_data) ) -> state);
IX25DEBUG( "isdn_x25iface_connect_ind %s \n"
@@ -220,6 +220,8 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
return -1;
}
*state_p = WAN_CONNECTED;
+
+ skb = dev_alloc_skb(1);
if( skb ){
*( skb_put(skb, 1) ) = 0x01;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fe6541326c71..9b015f9af351 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -18,7 +18,7 @@
#include <linux/leds.h>
#include "leds.h"
-rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 5e2cd8be1191..1b1ce6523960 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
/*
* Nests outside led_cdev->trigger_lock
*/
-static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(triggers_list_lock);
static LIST_HEAD(trigger_list);
ssize_t led_trigger_store(struct class_device *dev, const char *buf,
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 8972e53d2dcb..45a268f8047e 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
-obj-$(CONFIG_ADB_PMU) += via-pmu.o
+obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
obj-$(CONFIG_ADB_CUDA) += via-cuda.o
obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c26e1236b275..cbfbbe2b150a 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -179,7 +179,7 @@ u8 adb_to_linux_keycodes[128] = {
/* 0x65 */ KEY_F9, /* 67 */
/* 0x66 */ KEY_HANJA, /* 123 */
/* 0x67 */ KEY_F11, /* 87 */
- /* 0x68 */ KEY_HANGUEL, /* 122 */
+ /* 0x68 */ KEY_HANGEUL, /* 122 */
/* 0x69 */ KEY_SYSRQ, /* 99 */
/* 0x6a */ 0,
/* 0x6b */ KEY_SCROLLLOCK, /* 70 */
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 431bd37225a1..c687ac703941 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -428,10 +428,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
/* MacIO itself has a different reg, we use it's PCI base */
if (np == chip->of_node) {
- sprintf(dev->ofdev.dev.bus_id, "%1d.%08lx:%.*s",
+ sprintf(dev->ofdev.dev.bus_id, "%1d.%016llx:%.*s",
chip->lbus.index,
#ifdef CONFIG_PCI
- pci_resource_start(chip->lbus.pdev, 0),
+ (unsigned long long)pci_resource_start(chip->lbus.pdev, 0),
#else
0, /* NuBus may want to do something better here */
#endif
diff --git a/drivers/macintosh/via-pmu-event.c b/drivers/macintosh/via-pmu-event.c
new file mode 100644
index 000000000000..25cd56542328
--- /dev/null
+++ b/drivers/macintosh/via-pmu-event.c
@@ -0,0 +1,80 @@
+/*
+ * via-pmu event device for reporting some events that come through the PMU
+ *
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include "via-pmu-event.h"
+
+static struct input_dev *pmu_input_dev;
+
+static int __init via_pmu_event_init(void)
+{
+ int err;
+
+ /* do other models report button/lid status? */
+ if (pmu_get_model() != PMU_KEYLARGO_BASED)
+ return -ENODEV;
+
+ pmu_input_dev = input_allocate_device();
+ if (!pmu_input_dev)
+ return -ENOMEM;
+
+ pmu_input_dev->name = "PMU";
+ pmu_input_dev->id.bustype = BUS_HOST;
+ pmu_input_dev->id.vendor = 0x0001;
+ pmu_input_dev->id.product = 0x0001;
+ pmu_input_dev->id.version = 0x0100;
+
+ set_bit(EV_KEY, pmu_input_dev->evbit);
+ set_bit(EV_SW, pmu_input_dev->evbit);
+ set_bit(KEY_POWER, pmu_input_dev->keybit);
+ set_bit(SW_LID, pmu_input_dev->swbit);
+
+ err = input_register_device(pmu_input_dev);
+ if (err)
+ input_free_device(pmu_input_dev);
+ return err;
+}
+
+void via_pmu_event(int key, int down)
+{
+
+ if (unlikely(!pmu_input_dev))
+ return;
+
+ switch (key) {
+ case PMU_EVT_POWER:
+ input_report_key(pmu_input_dev, KEY_POWER, down);
+ break;
+ case PMU_EVT_LID:
+ input_report_switch(pmu_input_dev, SW_LID, down);
+ break;
+ default:
+ /* no such key handled */
+ return;
+ }
+
+ input_sync(pmu_input_dev);
+}
+
+late_initcall(via_pmu_event_init);
diff --git a/drivers/macintosh/via-pmu-event.h b/drivers/macintosh/via-pmu-event.h
new file mode 100644
index 000000000000..72c54de408e8
--- /dev/null
+++ b/drivers/macintosh/via-pmu-event.h
@@ -0,0 +1,8 @@
+#ifndef __VIA_PMU_EVENT_H
+#define __VIA_PMU_EVENT_H
+
+#define PMU_EVT_POWER 0
+#define PMU_EVT_LID 1
+extern void via_pmu_event(int key, int down);
+
+#endif /* __VIA_PMU_EVENT_H */
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 2a355ae59562..1ab4f16c08b9 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -69,6 +69,8 @@
#include <asm/open_pic.h>
#endif
+#include "via-pmu-event.h"
+
/* Some compile options */
#undef SUSPEND_USES_PMU
#define DEBUG_SLEEP
@@ -1427,6 +1429,12 @@ next:
if (pmu_battery_count)
query_battery_state();
pmu_pass_intr(data, len);
+ /* len == 6 is probably a bad check. But how do I
+ * know what PMU versions send what events here? */
+ if (len == 6) {
+ via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
+ via_pmu_event(PMU_EVT_LID, data[1]&1);
+ }
} else {
pmu_pass_intr(data, len);
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ac25a48362ac..bf869ed03eed 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -90,7 +90,7 @@ config MD_RAID10
depends on BLK_DEV_MD && EXPERIMENTAL
---help---
RAID-10 provides a combination of striping (RAID-0) and
- mirroring (RAID-1) with easier configuration and more flexable
+ mirroring (RAID-1) with easier configuration and more flexible
layout.
Unlike RAID-0, but like RAID-1, RAID-10 requires all devices to
be the same size (or at least, only as much as the smallest device
@@ -104,8 +104,8 @@ config MD_RAID10
If unsure, say Y.
-config MD_RAID5
- tristate "RAID-4/RAID-5 mode"
+config MD_RAID456
+ tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
---help---
A RAID-5 set of N drives with a capacity of C MB per drive provides
@@ -116,20 +116,28 @@ config MD_RAID5
while a RAID-5 set distributes the parity across the drives in one
of the available parity distribution methods.
+ A RAID-6 set of N drives with a capacity of C MB per drive
+ provides the capacity of C * (N - 2) MB, and protects
+ against a failure of any two drives. For a given sector
+ (row) number, (N - 2) drives contain data sectors, and two
+ drives contains two independent redundancy syndromes. Like
+ RAID-5, RAID-6 distributes the syndromes across the drives
+ in one of the available parity distribution methods.
+
Information about Software RAID on Linux is contained in the
Software-RAID mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. There you will also
learn where to get the supporting user space utilities raidtools.
- If you want to use such a RAID-4/RAID-5 set, say Y. To
+ If you want to use such a RAID-4/RAID-5/RAID-6 set, say Y. To
compile this code as a module, choose M here: the module
- will be called raid5.
+ will be called raid456.
If unsure, say Y.
config MD_RAID5_RESHAPE
bool "Support adding drives to a raid-5 array (experimental)"
- depends on MD_RAID5 && EXPERIMENTAL
+ depends on MD_RAID456 && EXPERIMENTAL
---help---
A RAID-5 set can be expanded by adding extra drives. This
requires "restriping" the array which means (almost) every
@@ -139,7 +147,7 @@ config MD_RAID5_RESHAPE
is online. However it is still EXPERIMENTAL code. It should
work, but please be sure that you have backups.
- You will need mdadm verion 2.4.1 or later to use this
+ You will need mdadm version 2.4.1 or later to use this
feature safely. During the early stage of reshape there is
a critical section where live data is being over-written. A
crash during this time needs extra care for recovery. The
@@ -154,28 +162,6 @@ config MD_RAID5_RESHAPE
There should be enough spares already present to make the new
array workable.
-config MD_RAID6
- tristate "RAID-6 mode"
- depends on BLK_DEV_MD
- ---help---
- A RAID-6 set of N drives with a capacity of C MB per drive
- provides the capacity of C * (N - 2) MB, and protects
- against a failure of any two drives. For a given sector
- (row) number, (N - 2) drives contain data sectors, and two
- drives contains two independent redundancy syndromes. Like
- RAID-5, RAID-6 distributes the syndromes across the drives
- in one of the available parity distribution methods.
-
- RAID-6 requires mdadm-1.5.0 or later, available at:
-
- ftp://ftp.kernel.org/pub/linux/utils/raid/mdadm/
-
- If you want to use such a RAID-6 set, say Y. To compile
- this code as a module, choose M here: the module will be
- called raid6.
-
- If unsure, say Y.
-
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
@@ -235,7 +221,7 @@ config DM_SNAPSHOT
tristate "Snapshot target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
---help---
- Allow volume managers to take writeable snapshots of a device.
+ Allow volume managers to take writable snapshots of a device.
config DM_MIRROR
tristate "Mirror target (EXPERIMENTAL)"
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d3efedf6a6ad..34957a68d921 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -8,7 +8,7 @@ dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs := dm-log.o dm-raid1.o
md-mod-objs := md.o bitmap.o
-raid6-objs := raid6main.o raid6algos.o raid6recov.o raid6tables.o \
+raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
raid6int8.o raid6int16.o raid6int32.o \
raid6altivec1.o raid6altivec2.o raid6altivec4.o \
@@ -25,8 +25,7 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
-obj-$(CONFIG_MD_RAID5) += raid5.o xor.o
-obj-$(CONFIG_MD_RAID6) += raid6.o xor.o
+obj-$(CONFIG_MD_RAID456) += raid456.o xor.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f8ffaee20ff8..ebbd2d856256 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -7,7 +7,6 @@
* additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
* - added disk storage for bitmap
* - changes to allow various bitmap chunk sizes
- * - added bitmap daemon (to asynchronously clear bitmap bits from disk)
*/
/*
@@ -15,9 +14,6 @@
*
* flush after percent set rather than just time based. (maybe both).
* wait if count gets too high, wake when it drops to half.
- * allow bitmap to be mirrored with superblock (before or after...)
- * allow hot-add to re-instate a current device.
- * allow hot-add of bitmap after quiessing device
*/
#include <linux/module.h>
@@ -73,24 +69,6 @@ static inline char * bmname(struct bitmap *bitmap)
/*
- * test if the bitmap is active
- */
-int bitmap_active(struct bitmap *bitmap)
-{
- unsigned long flags;
- int res = 0;
-
- if (!bitmap)
- return res;
- spin_lock_irqsave(&bitmap->lock, flags);
- res = bitmap->flags & BITMAP_ACTIVE;
- spin_unlock_irqrestore(&bitmap->lock, flags);
- return res;
-}
-
-#define WRITE_POOL_SIZE 256
-
-/*
* just a placeholder - calls kmalloc for bitmap pages
*/
static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
@@ -269,6 +247,8 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
page->index = index;
+ attach_page_buffers(page, NULL); /* so that free_buffer will
+ * quietly no-op */
return page;
}
}
@@ -300,77 +280,132 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai
*/
static int write_page(struct bitmap *bitmap, struct page *page, int wait)
{
- int ret = -ENOMEM;
+ struct buffer_head *bh;
if (bitmap->file == NULL)
return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
- flush_dcache_page(page); /* make sure visible to anyone reading the file */
+ bh = page_buffers(page);
- if (wait)
- lock_page(page);
- else {
- if (TestSetPageLocked(page))
- return -EAGAIN; /* already locked */
- if (PageWriteback(page)) {
- unlock_page(page);
- return -EAGAIN;
- }
+ while (bh && bh->b_blocknr) {
+ atomic_inc(&bitmap->pending_writes);
+ set_buffer_locked(bh);
+ set_buffer_mapped(bh);
+ submit_bh(WRITE, bh);
+ bh = bh->b_this_page;
}
- ret = page->mapping->a_ops->prepare_write(bitmap->file, page, 0, PAGE_SIZE);
- if (!ret)
- ret = page->mapping->a_ops->commit_write(bitmap->file, page, 0,
- PAGE_SIZE);
- if (ret) {
- unlock_page(page);
- return ret;
+ if (wait) {
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
}
+ return 0;
+}
- set_page_dirty(page); /* force it to be written out */
-
- if (!wait) {
- /* add to list to be waited for by daemon */
- struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
- item->page = page;
- get_page(page);
- spin_lock(&bitmap->write_lock);
- list_add(&item->list, &bitmap->complete_pages);
- spin_unlock(&bitmap->write_lock);
- md_wakeup_thread(bitmap->writeback_daemon);
+static void end_bitmap_write(struct buffer_head *bh, int uptodate)
+{
+ struct bitmap *bitmap = bh->b_private;
+ unsigned long flags;
+
+ if (!uptodate) {
+ spin_lock_irqsave(&bitmap->lock, flags);
+ bitmap->flags |= BITMAP_WRITE_ERROR;
+ spin_unlock_irqrestore(&bitmap->lock, flags);
+ }
+ if (atomic_dec_and_test(&bitmap->pending_writes))
+ wake_up(&bitmap->write_wait);
+}
+
+/* copied from buffer.c */
+static void
+__clear_page_buffers(struct page *page)
+{
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ page_cache_release(page);
+}
+static void free_buffers(struct page *page)
+{
+ struct buffer_head *bh = page_buffers(page);
+
+ while (bh) {
+ struct buffer_head *next = bh->b_this_page;
+ free_buffer_head(bh);
+ bh = next;
}
- return write_one_page(page, wait);
+ __clear_page_buffers(page);
+ put_page(page);
}
-/* read a page from a file, pinning it into cache, and return bytes_read */
+/* read a page from a file.
+ * We both read the page, and attach buffers to the page to record the
+ * address of each block (using bmap). These addresses will be used
+ * to write the block later, completely bypassing the filesystem.
+ * This usage is similar to how swap files are handled, and allows us
+ * to write to a file with no concerns of memory allocation failing.
+ */
static struct page *read_page(struct file *file, unsigned long index,
- unsigned long *bytes_read)
+ struct bitmap *bitmap,
+ unsigned long count)
{
- struct inode *inode = file->f_mapping->host;
struct page *page = NULL;
- loff_t isize = i_size_read(inode);
- unsigned long end_index = isize >> PAGE_SHIFT;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct buffer_head *bh;
+ sector_t block;
PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- page = read_cache_page(inode->i_mapping, index,
- (filler_t *)inode->i_mapping->a_ops->readpage, file);
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ page = ERR_PTR(-ENOMEM);
if (IS_ERR(page))
goto out;
- wait_on_page_locked(page);
- if (!PageUptodate(page) || PageError(page)) {
+
+ bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
+ if (!bh) {
put_page(page);
- page = ERR_PTR(-EIO);
+ page = ERR_PTR(-ENOMEM);
goto out;
}
+ attach_page_buffers(page, bh);
+ block = index << (PAGE_SHIFT - inode->i_blkbits);
+ while (bh) {
+ if (count == 0)
+ bh->b_blocknr = 0;
+ else {
+ bh->b_blocknr = bmap(inode, block);
+ if (bh->b_blocknr == 0) {
+ /* Cannot use this file! */
+ free_buffers(page);
+ page = ERR_PTR(-EINVAL);
+ goto out;
+ }
+ bh->b_bdev = inode->i_sb->s_bdev;
+ if (count < (1<<inode->i_blkbits))
+ count = 0;
+ else
+ count -= (1<<inode->i_blkbits);
+
+ bh->b_end_io = end_bitmap_write;
+ bh->b_private = bitmap;
+ atomic_inc(&bitmap->pending_writes);
+ set_buffer_locked(bh);
+ set_buffer_mapped(bh);
+ submit_bh(READ, bh);
+ }
+ block++;
+ bh = bh->b_this_page;
+ }
+ page->index = index;
- if (index > end_index) /* we have read beyond EOF */
- *bytes_read = 0;
- else if (index == end_index) /* possible short read */
- *bytes_read = isize & ~PAGE_MASK;
- else
- *bytes_read = PAGE_SIZE; /* got a full page */
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ if (bitmap->flags & BITMAP_WRITE_ERROR) {
+ free_buffers(page);
+ page = ERR_PTR(-EIO);
+ }
out:
if (IS_ERR(page))
printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
@@ -441,16 +476,14 @@ static int bitmap_read_sb(struct bitmap *bitmap)
char *reason = NULL;
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
- unsigned long bytes_read;
unsigned long long events;
int err = -EINVAL;
/* page 0 is the superblock, read it... */
if (bitmap->file)
- bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read);
+ bitmap->sb_page = read_page(bitmap->file, 0, bitmap, PAGE_SIZE);
else {
bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
- bytes_read = PAGE_SIZE;
}
if (IS_ERR(bitmap->sb_page)) {
err = PTR_ERR(bitmap->sb_page);
@@ -460,13 +493,6 @@ static int bitmap_read_sb(struct bitmap *bitmap)
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
- if (bytes_read < sizeof(*sb)) { /* short read */
- printk(KERN_INFO "%s: bitmap file superblock truncated\n",
- bmname(bitmap));
- err = -ENOSPC;
- goto out;
- }
-
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep);
write_behind = le32_to_cpu(sb->write_behind);
@@ -550,7 +576,6 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
spin_unlock_irqrestore(&bitmap->lock, flags);
return;
}
- get_page(bitmap->sb_page);
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
switch (op) {
@@ -561,7 +586,6 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
default: BUG();
}
kunmap_atomic(sb, KM_USER0);
- put_page(bitmap->sb_page);
}
/*
@@ -614,48 +638,17 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
while (pages--)
if (map[pages]->index != 0) /* 0 is sb_page, release it below */
- put_page(map[pages]);
+ free_buffers(map[pages]);
kfree(map);
kfree(attr);
- safe_put_page(sb_page);
-}
-
-static void bitmap_stop_daemon(struct bitmap *bitmap);
-
-/* dequeue the next item in a page list -- don't call from irq context */
-static struct page_list *dequeue_page(struct bitmap *bitmap)
-{
- struct page_list *item = NULL;
- struct list_head *head = &bitmap->complete_pages;
-
- spin_lock(&bitmap->write_lock);
- if (list_empty(head))
- goto out;
- item = list_entry(head->prev, struct page_list, list);
- list_del(head->prev);
-out:
- spin_unlock(&bitmap->write_lock);
- return item;
-}
-
-static void drain_write_queues(struct bitmap *bitmap)
-{
- struct page_list *item;
-
- while ((item = dequeue_page(bitmap))) {
- /* don't bother to wait */
- put_page(item->page);
- mempool_free(item, bitmap->write_pool);
- }
-
- wake_up(&bitmap->write_wait);
+ if (sb_page)
+ free_buffers(sb_page);
}
static void bitmap_file_put(struct bitmap *bitmap)
{
struct file *file;
- struct inode *inode;
unsigned long flags;
spin_lock_irqsave(&bitmap->lock, flags);
@@ -663,17 +656,14 @@ static void bitmap_file_put(struct bitmap *bitmap)
bitmap->file = NULL;
spin_unlock_irqrestore(&bitmap->lock, flags);
- bitmap_stop_daemon(bitmap);
-
- drain_write_queues(bitmap);
-
+ if (file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
bitmap_file_unmap(bitmap);
if (file) {
- inode = file->f_mapping->host;
- spin_lock(&inode->i_lock);
- atomic_set(&inode->i_writecount, 1); /* allow writes again */
- spin_unlock(&inode->i_lock);
+ struct inode *inode = file->f_dentry->d_inode;
+ invalidate_inode_pages(inode->i_mapping);
fput(file);
}
}
@@ -708,26 +698,27 @@ static void bitmap_file_kick(struct bitmap *bitmap)
}
enum bitmap_page_attr {
- BITMAP_PAGE_DIRTY = 1, // there are set bits that need to be synced
- BITMAP_PAGE_CLEAN = 2, // there are bits that might need to be cleared
- BITMAP_PAGE_NEEDWRITE=4, // there are cleared bits that need to be synced
+ BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced
+ BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared
+ BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced
};
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- bitmap->filemap_attr[page->index] |= attr;
+ __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- bitmap->filemap_attr[page->index] &= ~attr;
+ __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
-static inline unsigned long get_page_attr(struct bitmap *bitmap, struct page *page)
+static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
+ enum bitmap_page_attr attr)
{
- return bitmap->filemap_attr[page->index];
+ return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
/*
@@ -751,11 +742,6 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
page = filemap_get_page(bitmap, chunk);
bit = file_page_offset(chunk);
-
- /* make sure the page stays cached until it gets written out */
- if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
- get_page(page);
-
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
@@ -775,7 +761,8 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
* sync the dirty pages of the bitmap file to disk */
int bitmap_unplug(struct bitmap *bitmap)
{
- unsigned long i, attr, flags;
+ unsigned long i, flags;
+ int dirty, need_write;
struct page *page;
int wait = 0;
int err;
@@ -792,35 +779,26 @@ int bitmap_unplug(struct bitmap *bitmap)
return 0;
}
page = bitmap->filemap[i];
- attr = get_page_attr(bitmap, page);
+ dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
+ need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
- if ((attr & BITMAP_PAGE_DIRTY))
+ if (dirty)
wait = 1;
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE)) {
+ if (dirty | need_write)
err = write_page(bitmap, page, 0);
- if (err == -EAGAIN) {
- if (attr & BITMAP_PAGE_DIRTY)
- err = write_page(bitmap, page, 1);
- else
- err = 0;
- }
- if (err)
- return 1;
- }
}
if (wait) { /* if any writes were performed, we need to wait on them */
- if (bitmap->file) {
- spin_lock_irq(&bitmap->write_lock);
- wait_event_lock_irq(bitmap->write_wait,
- list_empty(&bitmap->complete_pages), bitmap->write_lock,
- wake_up_process(bitmap->writeback_daemon->tsk));
- spin_unlock_irq(&bitmap->write_lock);
- } else
+ if (bitmap->file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ else
md_super_wait(bitmap->mddev);
}
+ if (bitmap->flags & BITMAP_WRITE_ERROR)
+ bitmap_file_kick(bitmap);
return 0;
}
@@ -842,7 +820,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
struct page *page = NULL, *oldpage = NULL;
unsigned long num_pages, bit_cnt = 0;
struct file *file;
- unsigned long bytes, offset, dummy;
+ unsigned long bytes, offset;
int outofdate;
int ret = -ENOSPC;
void *paddr;
@@ -879,7 +857,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (!bitmap->filemap)
goto out;
- bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL);
+ /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
+ bitmap->filemap_attr = kzalloc(
+ (((num_pages*4/8)+sizeof(unsigned long)-1)
+ /sizeof(unsigned long))
+ *sizeof(unsigned long),
+ GFP_KERNEL);
if (!bitmap->filemap_attr)
goto out;
@@ -890,7 +873,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
index = file_page_index(i);
bit = file_page_offset(i);
if (index != oldindex) { /* this is a new page, read it in */
+ int count;
/* unmap the old page, we're done with it */
+ if (index == num_pages-1)
+ count = bytes - index * PAGE_SIZE;
+ else
+ count = PAGE_SIZE;
if (index == 0) {
/*
* if we're here then the superblock page
@@ -900,7 +888,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
page = bitmap->sb_page;
offset = sizeof(bitmap_super_t);
} else if (file) {
- page = read_page(file, index, &dummy);
+ page = read_page(file, index, bitmap, count);
offset = 0;
} else {
page = read_sb_page(bitmap->mddev, bitmap->offset, index);
@@ -971,12 +959,11 @@ void bitmap_write_all(struct bitmap *bitmap)
/* We don't actually write all bitmap blocks here,
* just flag them as needing to be written
*/
+ int i;
- unsigned long chunks = bitmap->chunks;
- unsigned long bytes = (chunks+7)/8 + sizeof(bitmap_super_t);
- unsigned long num_pages = (bytes + PAGE_SIZE-1) / PAGE_SIZE;
- while (num_pages--)
- bitmap->filemap_attr[num_pages] |= BITMAP_PAGE_NEEDWRITE;
+ for (i=0; i < bitmap->file_pages; i++)
+ set_page_attr(bitmap, bitmap->filemap[i],
+ BITMAP_PAGE_NEEDWRITE);
}
@@ -1007,7 +994,6 @@ int bitmap_daemon_work(struct bitmap *bitmap)
struct page *page = NULL, *lastpage = NULL;
int err = 0;
int blocks;
- int attr;
void *paddr;
if (bitmap == NULL)
@@ -1029,43 +1015,34 @@ int bitmap_daemon_work(struct bitmap *bitmap)
if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */
- if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
- if (attr & BITMAP_PAGE_NEEDWRITE) {
- get_page(page);
+ if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) {
+ int need_write = test_page_attr(bitmap, page,
+ BITMAP_PAGE_NEEDWRITE);
+ if (need_write)
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
- }
+
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (attr & BITMAP_PAGE_NEEDWRITE) {
+ if (need_write) {
switch (write_page(bitmap, page, 0)) {
- case -EAGAIN:
- set_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
- break;
case 0:
break;
default:
bitmap_file_kick(bitmap);
}
- put_page(page);
}
continue;
}
/* grab the new page, sync and release the old */
- get_page(page);
if (lastpage != NULL) {
- if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
+ if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
err = write_page(bitmap, lastpage, 0);
- if (err == -EAGAIN) {
- err = 0;
- set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
- }
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- put_page(lastpage);
if (err)
bitmap_file_kick(bitmap);
} else
@@ -1107,131 +1084,19 @@ int bitmap_daemon_work(struct bitmap *bitmap)
/* now sync the final page */
if (lastpage != NULL) {
spin_lock_irqsave(&bitmap->lock, flags);
- if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
+ if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
err = write_page(bitmap, lastpage, 0);
- if (err == -EAGAIN) {
- set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
- err = 0;
- }
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
-
- put_page(lastpage);
}
return err;
}
-static void daemon_exit(struct bitmap *bitmap, mdk_thread_t **daemon)
-{
- mdk_thread_t *dmn;
- unsigned long flags;
-
- /* if no one is waiting on us, we'll free the md thread struct
- * and exit, otherwise we let the waiter clean things up */
- spin_lock_irqsave(&bitmap->lock, flags);
- if ((dmn = *daemon)) { /* no one is waiting, cleanup and exit */
- *daemon = NULL;
- spin_unlock_irqrestore(&bitmap->lock, flags);
- kfree(dmn);
- complete_and_exit(NULL, 0); /* do_exit not exported */
- }
- spin_unlock_irqrestore(&bitmap->lock, flags);
-}
-
-static void bitmap_writeback_daemon(mddev_t *mddev)
-{
- struct bitmap *bitmap = mddev->bitmap;
- struct page *page;
- struct page_list *item;
- int err = 0;
-
- if (signal_pending(current)) {
- printk(KERN_INFO
- "%s: bitmap writeback daemon got signal, exiting...\n",
- bmname(bitmap));
- err = -EINTR;
- goto out;
- }
- if (bitmap == NULL)
- /* about to be stopped. */
- return;
-
- PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap));
- /* wait on bitmap page writebacks */
- while ((item = dequeue_page(bitmap))) {
- page = item->page;
- mempool_free(item, bitmap->write_pool);
- PRINTK("wait on page writeback: %p\n", page);
- wait_on_page_writeback(page);
- PRINTK("finished page writeback: %p\n", page);
-
- err = PageError(page);
- put_page(page);
- if (err) {
- printk(KERN_WARNING "%s: bitmap file writeback "
- "failed (page %lu): %d\n",
- bmname(bitmap), page->index, err);
- bitmap_file_kick(bitmap);
- goto out;
- }
- }
- out:
- wake_up(&bitmap->write_wait);
- if (err) {
- printk(KERN_INFO "%s: bitmap writeback daemon exiting (%d)\n",
- bmname(bitmap), err);
- daemon_exit(bitmap, &bitmap->writeback_daemon);
- }
-}
-
-static mdk_thread_t *bitmap_start_daemon(struct bitmap *bitmap,
- void (*func)(mddev_t *), char *name)
-{
- mdk_thread_t *daemon;
- char namebuf[32];
-
-#ifdef INJECT_FATAL_FAULT_2
- daemon = NULL;
-#else
- sprintf(namebuf, "%%s_%s", name);
- daemon = md_register_thread(func, bitmap->mddev, namebuf);
-#endif
- if (!daemon) {
- printk(KERN_ERR "%s: failed to start bitmap daemon\n",
- bmname(bitmap));
- return ERR_PTR(-ECHILD);
- }
-
- md_wakeup_thread(daemon); /* start it running */
-
- PRINTK("%s: %s daemon (pid %d) started...\n",
- bmname(bitmap), name, daemon->tsk->pid);
-
- return daemon;
-}
-
-static void bitmap_stop_daemon(struct bitmap *bitmap)
-{
- /* the daemon can't stop itself... it'll just exit instead... */
- if (bitmap->writeback_daemon && ! IS_ERR(bitmap->writeback_daemon) &&
- current->pid != bitmap->writeback_daemon->tsk->pid) {
- mdk_thread_t *daemon;
- unsigned long flags;
-
- spin_lock_irqsave(&bitmap->lock, flags);
- daemon = bitmap->writeback_daemon;
- bitmap->writeback_daemon = NULL;
- spin_unlock_irqrestore(&bitmap->lock, flags);
- if (daemon && ! IS_ERR(daemon))
- md_unregister_thread(daemon); /* destroy the thread */
- }
-}
-
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
sector_t offset, int *blocks,
int create)
@@ -1500,8 +1365,6 @@ static void bitmap_free(struct bitmap *bitmap)
/* free all allocated memory */
- mempool_destroy(bitmap->write_pool);
-
if (bp) /* deallocate the page memory */
for (k = 0; k < pages; k++)
if (bp[k].map && !bp[k].hijacked)
@@ -1549,20 +1412,20 @@ int bitmap_create(mddev_t *mddev)
return -ENOMEM;
spin_lock_init(&bitmap->lock);
- bitmap->mddev = mddev;
-
- spin_lock_init(&bitmap->write_lock);
- INIT_LIST_HEAD(&bitmap->complete_pages);
+ atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
- bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
- sizeof(struct page_list));
- err = -ENOMEM;
- if (!bitmap->write_pool)
- goto error;
+
+ bitmap->mddev = mddev;
bitmap->file = file;
bitmap->offset = mddev->bitmap_offset;
- if (file) get_file(file);
+ if (file) {
+ get_file(file);
+ do_sync_file_range(file, 0, LLONG_MAX,
+ SYNC_FILE_RANGE_WAIT_BEFORE |
+ SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+ }
/* read superblock from bitmap file (this sets bitmap->chunksize) */
err = bitmap_read_sb(bitmap);
if (err)
@@ -1594,8 +1457,6 @@ int bitmap_create(mddev_t *mddev)
if (!bitmap->bp)
goto error;
- bitmap->flags |= BITMAP_ACTIVE;
-
/* now that we have some pages available, initialize the in-memory
* bitmap from the on-disk bitmap */
start = 0;
@@ -1613,15 +1474,6 @@ int bitmap_create(mddev_t *mddev)
mddev->bitmap = bitmap;
- if (file)
- /* kick off the bitmap writeback daemon */
- bitmap->writeback_daemon =
- bitmap_start_daemon(bitmap,
- bitmap_writeback_daemon,
- "bitmap_wb");
-
- if (IS_ERR(bitmap->writeback_daemon))
- return PTR_ERR(bitmap->writeback_daemon);
mddev->thread->timeout = bitmap->daemon_sleep * HZ;
return bitmap_update_sb(bitmap);
@@ -1638,4 +1490,3 @@ EXPORT_SYMBOL(bitmap_start_sync);
EXPORT_SYMBOL(bitmap_end_sync);
EXPORT_SYMBOL(bitmap_unplug);
EXPORT_SYMBOL(bitmap_close_sync);
-EXPORT_SYMBOL(bitmap_daemon_work);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 61a590bb6241..6022ed12a795 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -20,7 +20,7 @@
#include "dm.h"
-#define PFX "crypt: "
+#define DM_MSG_PREFIX "crypt"
/*
* per bio private data
@@ -125,19 +125,19 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
u8 *salt;
if (opts == NULL) {
- ti->error = PFX "Digest algorithm missing for ESSIV mode";
+ ti->error = "Digest algorithm missing for ESSIV mode";
return -EINVAL;
}
/* Hash the cipher key with the given hash algorithm */
hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
if (hash_tfm == NULL) {
- ti->error = PFX "Error initializing ESSIV hash";
+ ti->error = "Error initializing ESSIV hash";
return -EINVAL;
}
if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) {
- ti->error = PFX "Expected digest algorithm for ESSIV hash";
+ ti->error = "Expected digest algorithm for ESSIV hash";
crypto_free_tfm(hash_tfm);
return -EINVAL;
}
@@ -145,7 +145,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
saltsize = crypto_tfm_alg_digestsize(hash_tfm);
salt = kmalloc(saltsize, GFP_KERNEL);
if (salt == NULL) {
- ti->error = PFX "Error kmallocing salt storage in ESSIV";
+ ti->error = "Error kmallocing salt storage in ESSIV";
crypto_free_tfm(hash_tfm);
return -ENOMEM;
}
@@ -159,20 +159,20 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
CRYPTO_TFM_MODE_ECB |
CRYPTO_TFM_REQ_MAY_SLEEP);
if (essiv_tfm == NULL) {
- ti->error = PFX "Error allocating crypto tfm for ESSIV";
+ ti->error = "Error allocating crypto tfm for ESSIV";
kfree(salt);
return -EINVAL;
}
if (crypto_tfm_alg_blocksize(essiv_tfm)
!= crypto_tfm_alg_ivsize(cc->tfm)) {
- ti->error = PFX "Block size of ESSIV cipher does "
+ ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
crypto_free_tfm(essiv_tfm);
kfree(salt);
return -EINVAL;
}
if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) {
- ti->error = PFX "Failed to set key for ESSIV cipher";
+ ti->error = "Failed to set key for ESSIV cipher";
crypto_free_tfm(essiv_tfm);
kfree(salt);
return -EINVAL;
@@ -521,7 +521,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned long long tmpll;
if (argc != 5) {
- ti->error = PFX "Not enough arguments";
+ ti->error = "Not enough arguments";
return -EINVAL;
}
@@ -532,21 +532,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ivmode = strsep(&ivopts, ":");
if (tmp)
- DMWARN(PFX "Unexpected additional cipher options");
+ DMWARN("Unexpected additional cipher options");
key_size = strlen(argv[1]) >> 1;
cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
if (cc == NULL) {
ti->error =
- PFX "Cannot allocate transparent encryption context";
+ "Cannot allocate transparent encryption context";
return -ENOMEM;
}
cc->key_size = key_size;
if ((!key_size && strcmp(argv[1], "-") != 0) ||
(key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) {
- ti->error = PFX "Error decoding key";
+ ti->error = "Error decoding key";
goto bad1;
}
@@ -562,22 +562,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else if (strcmp(chainmode, "ecb") == 0)
crypto_flags = CRYPTO_TFM_MODE_ECB;
else {
- ti->error = PFX "Unknown chaining mode";
+ ti->error = "Unknown chaining mode";
goto bad1;
}
if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) {
- ti->error = PFX "This chaining mode requires an IV mechanism";
+ ti->error = "This chaining mode requires an IV mechanism";
goto bad1;
}
tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
if (!tfm) {
- ti->error = PFX "Error allocating crypto tfm";
+ ti->error = "Error allocating crypto tfm";
goto bad1;
}
if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
- ti->error = PFX "Expected cipher algorithm";
+ ti->error = "Expected cipher algorithm";
goto bad2;
}
@@ -595,7 +595,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else {
- ti->error = PFX "Invalid IV mode";
+ ti->error = "Invalid IV mode";
goto bad2;
}
@@ -610,7 +610,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else {
cc->iv_size = 0;
if (cc->iv_gen_ops) {
- DMWARN(PFX "Selected cipher does not support IVs");
+ DMWARN("Selected cipher does not support IVs");
if (cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
cc->iv_gen_ops = NULL;
@@ -619,36 +619,36 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
if (!cc->io_pool) {
- ti->error = PFX "Cannot allocate crypt io mempool";
+ ti->error = "Cannot allocate crypt io mempool";
goto bad3;
}
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
- ti->error = PFX "Cannot allocate page mempool";
+ ti->error = "Cannot allocate page mempool";
goto bad4;
}
if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
- ti->error = PFX "Error setting key";
+ ti->error = "Error setting key";
goto bad5;
}
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
- ti->error = PFX "Invalid iv_offset sector";
+ ti->error = "Invalid iv_offset sector";
goto bad5;
}
cc->iv_offset = tmpll;
if (sscanf(argv[4], "%llu", &tmpll) != 1) {
- ti->error = PFX "Invalid device sector";
+ ti->error = "Invalid device sector";
goto bad5;
}
cc->start = tmpll;
if (dm_get_device(ti, argv[3], cc->start, ti->len,
dm_table_get_mode(ti->table), &cc->dev)) {
- ti->error = PFX "Device lookup failed";
+ ti->error = "Device lookup failed";
goto bad5;
}
@@ -657,7 +657,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*(ivopts - 1) = ':';
cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
if (!cc->iv_mode) {
- ti->error = PFX "Error kmallocing iv_mode string";
+ ti->error = "Error kmallocing iv_mode string";
goto bad5;
}
strcpy(cc->iv_mode, ivmode);
@@ -918,13 +918,13 @@ static int __init dm_crypt_init(void)
_kcryptd_workqueue = create_workqueue("kcryptd");
if (!_kcryptd_workqueue) {
r = -ENOMEM;
- DMERR(PFX "couldn't create kcryptd");
+ DMERR("couldn't create kcryptd");
goto bad1;
}
r = dm_register_target(&crypt_target);
if (r < 0) {
- DMERR(PFX "register failed %d", r);
+ DMERR("register failed %d", r);
goto bad2;
}
@@ -942,7 +942,7 @@ static void __exit dm_crypt_exit(void)
int r = dm_unregister_target(&crypt_target);
if (r < 0)
- DMERR(PFX "unregister failed %d", r);
+ DMERR("unregister failed %d", r);
destroy_workqueue(_kcryptd_workqueue);
kmem_cache_destroy(_crypt_io_pool);
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index c7067674dcb7..2a374ccb30dd 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -12,6 +12,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
+#define DM_MSG_PREFIX "multipath emc"
+
struct emc_handler {
spinlock_t lock;
@@ -66,7 +68,7 @@ static struct bio *get_failover_bio(struct path *path, unsigned data_size)
bio = bio_alloc(GFP_ATOMIC, 1);
if (!bio) {
- DMERR("dm-emc: get_failover_bio: bio_alloc() failed.");
+ DMERR("get_failover_bio: bio_alloc() failed.");
return NULL;
}
@@ -78,13 +80,13 @@ static struct bio *get_failover_bio(struct path *path, unsigned data_size)
page = alloc_page(GFP_ATOMIC);
if (!page) {
- DMERR("dm-emc: get_failover_bio: alloc_page() failed.");
+ DMERR("get_failover_bio: alloc_page() failed.");
bio_put(bio);
return NULL;
}
if (bio_add_page(bio, page, data_size, 0) != data_size) {
- DMERR("dm-emc: get_failover_bio: alloc_page() failed.");
+ DMERR("get_failover_bio: alloc_page() failed.");
__free_page(page);
bio_put(bio);
return NULL;
@@ -103,7 +105,7 @@ static struct request *get_failover_req(struct emc_handler *h,
/* FIXME: Figure out why it fails with GFP_ATOMIC. */
rq = blk_get_request(q, WRITE, __GFP_WAIT);
if (!rq) {
- DMERR("dm-emc: get_failover_req: blk_get_request failed");
+ DMERR("get_failover_req: blk_get_request failed");
return NULL;
}
@@ -160,7 +162,7 @@ static struct request *emc_trespass_get(struct emc_handler *h,
bio = get_failover_bio(path, data_size);
if (!bio) {
- DMERR("dm-emc: emc_trespass_get: no bio");
+ DMERR("emc_trespass_get: no bio");
return NULL;
}
@@ -173,7 +175,7 @@ static struct request *emc_trespass_get(struct emc_handler *h,
/* get request for block layer packet command */
rq = get_failover_req(h, bio, path);
if (!rq) {
- DMERR("dm-emc: emc_trespass_get: no rq");
+ DMERR("emc_trespass_get: no rq");
free_bio(bio);
return NULL;
}
@@ -200,18 +202,18 @@ static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
* initial state passed into us and then get an update here.
*/
if (!q) {
- DMINFO("dm-emc: emc_pg_init: no queue");
+ DMINFO("emc_pg_init: no queue");
goto fail_path;
}
/* FIXME: The request should be pre-allocated. */
rq = emc_trespass_get(hwh->context, path);
if (!rq) {
- DMERR("dm-emc: emc_pg_init: no rq");
+ DMERR("emc_pg_init: no rq");
goto fail_path;
}
- DMINFO("dm-emc: emc_pg_init: sending switch-over command");
+ DMINFO("emc_pg_init: sending switch-over command");
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
return;
@@ -241,18 +243,18 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
hr = 0;
short_trespass = 0;
} else if (argc != 2) {
- DMWARN("dm-emc hwhandler: incorrect number of arguments");
+ DMWARN("incorrect number of arguments");
return -EINVAL;
} else {
if ((sscanf(argv[0], "%u", &short_trespass) != 1)
|| (short_trespass > 1)) {
- DMWARN("dm-emc: invalid trespass mode selected");
+ DMWARN("invalid trespass mode selected");
return -EINVAL;
}
if ((sscanf(argv[1], "%u", &hr) != 1)
|| (hr > 1)) {
- DMWARN("dm-emc: invalid honor reservation flag selected");
+ DMWARN("invalid honor reservation flag selected");
return -EINVAL;
}
}
@@ -264,14 +266,14 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
hwh->context = h;
if ((h->short_trespass = short_trespass))
- DMWARN("dm-emc: short trespass command will be send");
+ DMWARN("short trespass command will be send");
else
- DMWARN("dm-emc: long trespass command will be send");
+ DMWARN("long trespass command will be send");
if ((h->hr = hr))
- DMWARN("dm-emc: honor reservation bit will be set");
+ DMWARN("honor reservation bit will be set");
else
- DMWARN("dm-emc: honor reservation bit will not be set (default)");
+ DMWARN("honor reservation bit will not be set (default)");
return 0;
}
@@ -336,9 +338,9 @@ static int __init dm_emc_init(void)
int r = dm_register_hw_handler(&emc_hwh);
if (r < 0)
- DMERR("emc: register failed %d", r);
+ DMERR("register failed %d", r);
- DMINFO("dm-emc version 0.0.3 loaded");
+ DMINFO("version 0.0.3 loaded");
return r;
}
@@ -348,7 +350,7 @@ static void __exit dm_emc_exit(void)
int r = dm_unregister_hw_handler(&emc_hwh);
if (r < 0)
- DMERR("emc: unregister failed %d", r);
+ DMERR("unregister failed %d", r);
}
module_init(dm_emc_init);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index cc07bbebbb16..d12379b5cdb5 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -16,6 +16,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#define DM_MSG_PREFIX "snapshots"
+
/*-----------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
@@ -91,7 +93,6 @@ struct pstore {
struct dm_snapshot *snap; /* up pointer to my snapshot */
int version;
int valid;
- uint32_t chunk_size;
uint32_t exceptions_per_area;
/*
@@ -133,7 +134,7 @@ static int alloc_area(struct pstore *ps)
int r = -ENOMEM;
size_t len;
- len = ps->chunk_size << SECTOR_SHIFT;
+ len = ps->snap->chunk_size << SECTOR_SHIFT;
/*
* Allocate the chunk_size block of memory that will hold
@@ -160,8 +161,8 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
unsigned long bits;
where.bdev = ps->snap->cow->bdev;
- where.sector = ps->chunk_size * chunk;
- where.count = ps->chunk_size;
+ where.sector = ps->snap->chunk_size * chunk;
+ where.count = ps->snap->chunk_size;
return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
}
@@ -188,7 +189,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
static int zero_area(struct pstore *ps, uint32_t area)
{
- memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
+ memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
return area_io(ps, area, WRITE);
}
@@ -196,6 +197,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
{
int r;
struct disk_header *dh;
+ chunk_t chunk_size;
r = chunk_io(ps, 0, READ);
if (r)
@@ -210,8 +212,29 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*new_snapshot = 0;
ps->valid = le32_to_cpu(dh->valid);
ps->version = le32_to_cpu(dh->version);
- ps->chunk_size = le32_to_cpu(dh->chunk_size);
-
+ chunk_size = le32_to_cpu(dh->chunk_size);
+ if (ps->snap->chunk_size != chunk_size) {
+ DMWARN("chunk size %llu in device metadata overrides "
+ "table chunk size of %llu.",
+ (unsigned long long)chunk_size,
+ (unsigned long long)ps->snap->chunk_size);
+
+ /* We had a bogus chunk_size. Fix stuff up. */
+ dm_io_put(sectors_to_pages(ps->snap->chunk_size));
+ free_area(ps);
+
+ ps->snap->chunk_size = chunk_size;
+ ps->snap->chunk_mask = chunk_size - 1;
+ ps->snap->chunk_shift = ffs(chunk_size) - 1;
+
+ r = alloc_area(ps);
+ if (r)
+ return r;
+
+ r = dm_io_get(sectors_to_pages(chunk_size));
+ if (r)
+ return r;
+ }
} else {
DMWARN("Invalid/corrupt snapshot");
r = -ENXIO;
@@ -224,13 +247,13 @@ static int write_header(struct pstore *ps)
{
struct disk_header *dh;
- memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
+ memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
dh = (struct disk_header *) ps->area;
dh->magic = cpu_to_le32(SNAP_MAGIC);
dh->valid = cpu_to_le32(ps->valid);
dh->version = cpu_to_le32(ps->version);
- dh->chunk_size = cpu_to_le32(ps->chunk_size);
+ dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
return chunk_io(ps, 0, WRITE);
}
@@ -365,7 +388,7 @@ static void persistent_destroy(struct exception_store *store)
{
struct pstore *ps = get_info(store);
- dm_io_put(sectors_to_pages(ps->chunk_size));
+ dm_io_put(sectors_to_pages(ps->snap->chunk_size));
vfree(ps->callbacks);
free_area(ps);
kfree(ps);
@@ -384,6 +407,16 @@ static int persistent_read_metadata(struct exception_store *store)
return r;
/*
+ * Now we know correct chunk_size, complete the initialisation.
+ */
+ ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
+ sizeof(struct disk_exception);
+ ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
+ sizeof(*ps->callbacks));
+ if (!ps->callbacks)
+ return -ENOMEM;
+
+ /*
* Do we need to setup a new snapshot ?
*/
if (new_snapshot) {
@@ -533,9 +566,6 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
ps->snap = store->snap;
ps->valid = 1;
ps->version = SNAPSHOT_DISK_VERSION;
- ps->chunk_size = chunk_size;
- ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) /
- sizeof(struct disk_exception);
ps->next_free = 2; /* skipping the header and first area */
ps->current_committed = 0;
@@ -543,18 +573,9 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
if (r)
goto bad;
- /*
- * Allocate space for all the callbacks.
- */
ps->callback_count = 0;
atomic_set(&ps->pending_count, 0);
- ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
-
- if (!ps->callbacks) {
- r = -ENOMEM;
- goto bad;
- }
+ ps->callbacks = NULL;
store->destroy = persistent_destroy;
store->read_metadata = persistent_read_metadata;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8edd6435414d..3edb3477f987 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -19,6 +19,7 @@
#include <asm/uaccess.h>
+#define DM_MSG_PREFIX "ioctl"
#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
/*-----------------------------------------------------------------
@@ -48,7 +49,7 @@ struct vers_iter {
static struct list_head _name_buckets[NUM_BUCKETS];
static struct list_head _uuid_buckets[NUM_BUCKETS];
-static void dm_hash_remove_all(void);
+static void dm_hash_remove_all(int keep_open_devices);
/*
* Guards access to both hash tables.
@@ -73,7 +74,7 @@ static int dm_hash_init(void)
static void dm_hash_exit(void)
{
- dm_hash_remove_all();
+ dm_hash_remove_all(0);
devfs_remove(DM_DIR);
}
@@ -102,8 +103,10 @@ static struct hash_cell *__get_name_cell(const char *str)
unsigned int h = hash_str(str);
list_for_each_entry (hc, _name_buckets + h, name_list)
- if (!strcmp(hc->name, str))
+ if (!strcmp(hc->name, str)) {
+ dm_get(hc->md);
return hc;
+ }
return NULL;
}
@@ -114,8 +117,10 @@ static struct hash_cell *__get_uuid_cell(const char *str)
unsigned int h = hash_str(str);
list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
- if (!strcmp(hc->uuid, str))
+ if (!strcmp(hc->uuid, str)) {
+ dm_get(hc->md);
return hc;
+ }
return NULL;
}
@@ -191,7 +196,7 @@ static int unregister_with_devfs(struct hash_cell *hc)
*/
static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
{
- struct hash_cell *cell;
+ struct hash_cell *cell, *hc;
/*
* Allocate the new cells.
@@ -204,14 +209,19 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
* Insert the cell into both hash tables.
*/
down_write(&_hash_lock);
- if (__get_name_cell(name))
+ hc = __get_name_cell(name);
+ if (hc) {
+ dm_put(hc->md);
goto bad;
+ }
list_add(&cell->name_list, _name_buckets + hash_str(name));
if (uuid) {
- if (__get_uuid_cell(uuid)) {
+ hc = __get_uuid_cell(uuid);
+ if (hc) {
list_del(&cell->name_list);
+ dm_put(hc->md);
goto bad;
}
list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
@@ -251,19 +261,41 @@ static void __hash_remove(struct hash_cell *hc)
free_cell(hc);
}
-static void dm_hash_remove_all(void)
+static void dm_hash_remove_all(int keep_open_devices)
{
- int i;
+ int i, dev_skipped, dev_removed;
struct hash_cell *hc;
struct list_head *tmp, *n;
down_write(&_hash_lock);
+
+retry:
+ dev_skipped = dev_removed = 0;
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_safe (tmp, n, _name_buckets + i) {
hc = list_entry(tmp, struct hash_cell, name_list);
+
+ if (keep_open_devices &&
+ dm_lock_for_deletion(hc->md)) {
+ dev_skipped++;
+ continue;
+ }
__hash_remove(hc);
+ dev_removed = 1;
}
}
+
+ /*
+ * Some mapped devices may be using other mapped devices, so if any
+ * still exist, repeat until we make no further progress.
+ */
+ if (dev_skipped) {
+ if (dev_removed)
+ goto retry;
+
+ DMWARN("remove_all left %d open device(s)", dev_skipped);
+ }
+
up_write(&_hash_lock);
}
@@ -289,6 +321,7 @@ static int dm_hash_rename(const char *old, const char *new)
if (hc) {
DMWARN("asked to rename to an already existing name %s -> %s",
old, new);
+ dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_name);
return -EBUSY;
@@ -328,6 +361,7 @@ static int dm_hash_rename(const char *old, const char *new)
dm_table_put(table);
}
+ dm_put(hc->md);
up_write(&_hash_lock);
kfree(old_name);
return 0;
@@ -344,7 +378,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
static int remove_all(struct dm_ioctl *param, size_t param_size)
{
- dm_hash_remove_all();
+ dm_hash_remove_all(1);
param->data_size = 0;
return 0;
}
@@ -524,7 +558,6 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
{
struct gendisk *disk = dm_disk(md);
struct dm_table *table;
- struct block_device *bdev;
param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
DM_ACTIVE_PRESENT_FLAG);
@@ -534,20 +567,12 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
- if (!(param->flags & DM_SKIP_BDGET_FLAG)) {
- bdev = bdget_disk(disk, 0);
- if (!bdev)
- return -ENXIO;
-
- /*
- * Yes, this will be out of date by the time it gets back
- * to userland, but it is still very useful for
- * debugging.
- */
- param->open_count = bdev->bd_openers;
- bdput(bdev);
- } else
- param->open_count = -1;
+ /*
+ * Yes, this will be out of date by the time it gets back
+ * to userland, but it is still very useful for
+ * debugging.
+ */
+ param->open_count = dm_open_count(md);
if (disk->policy)
param->flags |= DM_READONLY_FLAG;
@@ -567,7 +592,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
static int dev_create(struct dm_ioctl *param, size_t param_size)
{
- int r;
+ int r, m = DM_ANY_MINOR;
struct mapped_device *md;
r = check_name(param->name);
@@ -575,10 +600,9 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
return r;
if (param->flags & DM_PERSISTENT_DEV_FLAG)
- r = dm_create_with_minor(MINOR(huge_decode_dev(param->dev)), &md);
- else
- r = dm_create(&md);
+ m = MINOR(huge_decode_dev(param->dev));
+ r = dm_create(m, &md);
if (r)
return r;
@@ -611,10 +635,8 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
return __get_name_cell(param->name);
md = dm_get_md(huge_decode_dev(param->dev));
- if (md) {
+ if (md)
mdptr = dm_get_mdptr(md);
- dm_put(md);
- }
return mdptr;
}
@@ -628,7 +650,6 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
hc = __find_device_hash_cell(param);
if (hc) {
md = hc->md;
- dm_get(md);
/*
* Sneakily write in both the name and the uuid
@@ -653,6 +674,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
static int dev_remove(struct dm_ioctl *param, size_t param_size)
{
struct hash_cell *hc;
+ struct mapped_device *md;
+ int r;
down_write(&_hash_lock);
hc = __find_device_hash_cell(param);
@@ -663,8 +686,22 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
return -ENXIO;
}
+ md = hc->md;
+
+ /*
+ * Ensure the device is not open and nothing further can open it.
+ */
+ r = dm_lock_for_deletion(md);
+ if (r) {
+ DMWARN("unable to remove open device %s", hc->name);
+ up_write(&_hash_lock);
+ dm_put(md);
+ return r;
+ }
+
__hash_remove(hc);
up_write(&_hash_lock);
+ dm_put(md);
param->data_size = 0;
return 0;
}
@@ -790,7 +827,6 @@ static int do_resume(struct dm_ioctl *param)
}
md = hc->md;
- dm_get(md);
new_map = hc->new_map;
hc->new_map = NULL;
@@ -1078,6 +1114,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
{
int r;
struct hash_cell *hc;
+ struct mapped_device *md;
down_write(&_hash_lock);
@@ -1096,7 +1133,9 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
r = __dev_status(hc->md, param);
+ md = hc->md;
up_write(&_hash_lock);
+ dm_put(md);
return r;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index daf586c0898d..47b3c62bbdb8 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -12,6 +12,8 @@
#include <linux/bio.h>
#include <linux/slab.h>
+#define DM_MSG_PREFIX "linear"
+
/*
* Linear: maps a linear range of a device.
*/
@@ -29,7 +31,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned long long tmp;
if (argc != 2) {
- ti->error = "dm-linear: Invalid argument count";
+ ti->error = "Invalid argument count";
return -EINVAL;
}
@@ -111,7 +113,7 @@ int __init dm_linear_init(void)
int r = dm_register_target(&linear_target);
if (r < 0)
- DMERR("linear: register failed %d", r);
+ DMERR("register failed %d", r);
return r;
}
@@ -121,5 +123,5 @@ void dm_linear_exit(void)
int r = dm_unregister_target(&linear_target);
if (r < 0)
- DMERR("linear: unregister failed %d", r);
+ DMERR("unregister failed %d", r);
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index d73779a42417..64b764bd02cc 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,6 +12,8 @@
#include "dm-log.h"
#include "dm-io.h"
+#define DM_MSG_PREFIX "mirror log"
+
static LIST_HEAD(_log_types);
static DEFINE_SPINLOCK(_lock);
@@ -155,8 +157,6 @@ struct log_c {
struct io_region header_location;
struct log_header *disk_header;
-
- struct io_region bits_location;
};
/*
@@ -241,43 +241,21 @@ static inline int write_header(struct log_c *log)
}
/*----------------------------------------------------------------
- * Bits IO
- *--------------------------------------------------------------*/
-static int read_bits(struct log_c *log)
-{
- int r;
- unsigned long ebits;
-
- r = dm_io_sync_vm(1, &log->bits_location, READ,
- log->clean_bits, &ebits);
- if (r)
- return r;
-
- return 0;
-}
-
-static int write_bits(struct log_c *log)
-{
- unsigned long ebits;
- return dm_io_sync_vm(1, &log->bits_location, WRITE,
- log->clean_bits, &ebits);
-}
-
-/*----------------------------------------------------------------
* core log constructor/destructor
*
* argv contains region_size followed optionally by [no]sync
*--------------------------------------------------------------*/
#define BYTE_SHIFT 3
-static int core_ctr(struct dirty_log *log, struct dm_target *ti,
- unsigned int argc, char **argv)
+static int create_log_context(struct dirty_log *log, struct dm_target *ti,
+ unsigned int argc, char **argv,
+ struct dm_dev *dev)
{
enum sync sync = DEFAULTSYNC;
struct log_c *lc;
uint32_t region_size;
unsigned int region_count;
- size_t bitset_size;
+ size_t bitset_size, buf_size;
if (argc < 1 || argc > 2) {
DMWARN("wrong number of arguments to mirror log");
@@ -319,22 +297,53 @@ static int core_ctr(struct dirty_log *log, struct dm_target *ti,
* Work out how many "unsigned long"s we need to hold the bitset.
*/
bitset_size = dm_round_up(region_count,
- sizeof(unsigned long) << BYTE_SHIFT);
+ sizeof(*lc->clean_bits) << BYTE_SHIFT);
bitset_size >>= BYTE_SHIFT;
- lc->bitset_uint32_count = bitset_size / 4;
- lc->clean_bits = vmalloc(bitset_size);
- if (!lc->clean_bits) {
- DMWARN("couldn't allocate clean bitset");
- kfree(lc);
- return -ENOMEM;
+ lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
+
+ /*
+ * Disk log?
+ */
+ if (!dev) {
+ lc->clean_bits = vmalloc(bitset_size);
+ if (!lc->clean_bits) {
+ DMWARN("couldn't allocate clean bitset");
+ kfree(lc);
+ return -ENOMEM;
+ }
+ lc->disk_header = NULL;
+ } else {
+ lc->log_dev = dev;
+ lc->header_location.bdev = lc->log_dev->bdev;
+ lc->header_location.sector = 0;
+
+ /*
+ * Buffer holds both header and bitset.
+ */
+ buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
+ bitset_size, ti->limits.hardsect_size);
+ lc->header_location.count = buf_size >> SECTOR_SHIFT;
+
+ lc->disk_header = vmalloc(buf_size);
+ if (!lc->disk_header) {
+ DMWARN("couldn't allocate disk log buffer");
+ kfree(lc);
+ return -ENOMEM;
+ }
+
+ lc->clean_bits = (void *)lc->disk_header +
+ (LOG_OFFSET << SECTOR_SHIFT);
}
+
memset(lc->clean_bits, -1, bitset_size);
lc->sync_bits = vmalloc(bitset_size);
if (!lc->sync_bits) {
DMWARN("couldn't allocate sync bitset");
- vfree(lc->clean_bits);
+ if (!dev)
+ vfree(lc->clean_bits);
+ vfree(lc->disk_header);
kfree(lc);
return -ENOMEM;
}
@@ -345,25 +354,40 @@ static int core_ctr(struct dirty_log *log, struct dm_target *ti,
if (!lc->recovering_bits) {
DMWARN("couldn't allocate sync bitset");
vfree(lc->sync_bits);
- vfree(lc->clean_bits);
+ if (!dev)
+ vfree(lc->clean_bits);
+ vfree(lc->disk_header);
kfree(lc);
return -ENOMEM;
}
memset(lc->recovering_bits, 0, bitset_size);
lc->sync_search = 0;
log->context = lc;
+
return 0;
}
-static void core_dtr(struct dirty_log *log)
+static int core_ctr(struct dirty_log *log, struct dm_target *ti,
+ unsigned int argc, char **argv)
+{
+ return create_log_context(log, ti, argc, argv, NULL);
+}
+
+static void destroy_log_context(struct log_c *lc)
{
- struct log_c *lc = (struct log_c *) log->context;
- vfree(lc->clean_bits);
vfree(lc->sync_bits);
vfree(lc->recovering_bits);
kfree(lc);
}
+static void core_dtr(struct dirty_log *log)
+{
+ struct log_c *lc = (struct log_c *) log->context;
+
+ vfree(lc->clean_bits);
+ destroy_log_context(lc);
+}
+
/*----------------------------------------------------------------
* disk log constructor/destructor
*
@@ -373,8 +397,6 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
int r;
- size_t size;
- struct log_c *lc;
struct dm_dev *dev;
if (argc < 2 || argc > 3) {
@@ -387,49 +409,22 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
if (r)
return r;
- r = core_ctr(log, ti, argc - 1, argv + 1);
+ r = create_log_context(log, ti, argc - 1, argv + 1, dev);
if (r) {
dm_put_device(ti, dev);
return r;
}
- lc = (struct log_c *) log->context;
- lc->log_dev = dev;
-
- /* setup the disk header fields */
- lc->header_location.bdev = lc->log_dev->bdev;
- lc->header_location.sector = 0;
- lc->header_location.count = 1;
-
- /*
- * We can't read less than this amount, even though we'll
- * not be using most of this space.
- */
- lc->disk_header = vmalloc(1 << SECTOR_SHIFT);
- if (!lc->disk_header)
- goto bad;
-
- /* setup the disk bitset fields */
- lc->bits_location.bdev = lc->log_dev->bdev;
- lc->bits_location.sector = LOG_OFFSET;
-
- size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t),
- 1 << SECTOR_SHIFT);
- lc->bits_location.count = size >> SECTOR_SHIFT;
return 0;
-
- bad:
- dm_put_device(ti, lc->log_dev);
- core_dtr(log);
- return -ENOMEM;
}
static void disk_dtr(struct dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
+
dm_put_device(lc->ti, lc->log_dev);
vfree(lc->disk_header);
- core_dtr(log);
+ destroy_log_context(lc);
}
static int count_bits32(uint32_t *addr, unsigned size)
@@ -454,12 +449,7 @@ static int disk_resume(struct dirty_log *log)
if (r)
return r;
- /* read the bits */
- r = read_bits(lc);
- if (r)
- return r;
-
- /* set or clear any new bits */
+ /* set or clear any new bits -- device has grown */
if (lc->sync == NOSYNC)
for (i = lc->header.nr_regions; i < lc->region_count; i++)
/* FIXME: amazingly inefficient */
@@ -469,15 +459,14 @@ static int disk_resume(struct dirty_log *log)
/* FIXME: amazingly inefficient */
log_clear_bit(lc, lc->clean_bits, i);
+ /* clear any old bits -- device has shrunk */
+ for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
+ log_clear_bit(lc, lc->clean_bits, i);
+
/* copy clean across to sync */
memcpy(lc->sync_bits, lc->clean_bits, size);
lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
- /* write the bits */
- r = write_bits(lc);
- if (r)
- return r;
-
/* set the correct number of regions in the header */
lc->header.nr_regions = lc->region_count;
@@ -518,7 +507,7 @@ static int disk_flush(struct dirty_log *log)
if (!lc->touched)
return 0;
- r = write_bits(lc);
+ r = write_header(lc);
if (!r)
lc->touched = 0;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 1816f30678ed..217615b33223 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <asm/atomic.h>
+#define DM_MSG_PREFIX "multipath"
#define MESG_STR(x) x, sizeof(x)
/* Path properties */
@@ -446,8 +447,6 @@ struct param {
char *error;
};
-#define ESTR(s) ("dm-multipath: " s)
-
static int read_param(struct param *param, char *str, unsigned *v, char **error)
{
if (!str ||
@@ -495,12 +494,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
unsigned ps_argc;
static struct param _params[] = {
- {0, 1024, ESTR("invalid number of path selector args")},
+ {0, 1024, "invalid number of path selector args"},
};
pst = dm_get_path_selector(shift(as));
if (!pst) {
- ti->error = ESTR("unknown path selector type");
+ ti->error = "unknown path selector type";
return -EINVAL;
}
@@ -511,7 +510,7 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
- ti->error = ESTR("path selector constructor failed");
+ ti->error = "path selector constructor failed";
return r;
}
@@ -529,7 +528,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
/* we need at least a path arg */
if (as->argc < 1) {
- ti->error = ESTR("no device given");
+ ti->error = "no device given";
return NULL;
}
@@ -540,7 +539,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
r = dm_get_device(ti, shift(as), ti->begin, ti->len,
dm_table_get_mode(ti->table), &p->path.dev);
if (r) {
- ti->error = ESTR("error getting device");
+ ti->error = "error getting device";
goto bad;
}
@@ -562,8 +561,8 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
struct dm_target *ti)
{
static struct param _params[] = {
- {1, 1024, ESTR("invalid number of paths")},
- {0, 1024, ESTR("invalid number of selector args")}
+ {1, 1024, "invalid number of paths"},
+ {0, 1024, "invalid number of selector args"}
};
int r;
@@ -572,13 +571,13 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < 2) {
as->argc = 0;
- ti->error = ESTR("not enough priority group aruments");
+ ti->error = "not enough priority group aruments";
return NULL;
}
pg = alloc_priority_group();
if (!pg) {
- ti->error = ESTR("couldn't allocate priority group");
+ ti->error = "couldn't allocate priority group";
return NULL;
}
pg->m = m;
@@ -633,7 +632,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m,
unsigned hw_argc;
static struct param _params[] = {
- {0, 1024, ESTR("invalid number of hardware handler args")},
+ {0, 1024, "invalid number of hardware handler args"},
};
r = read_param(_params, shift(as), &hw_argc, &ti->error);
@@ -645,14 +644,14 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m,
hwht = dm_get_hw_handler(shift(as));
if (!hwht) {
- ti->error = ESTR("unknown hardware handler type");
+ ti->error = "unknown hardware handler type";
return -EINVAL;
}
r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
if (r) {
dm_put_hw_handler(hwht);
- ti->error = ESTR("hardware handler constructor failed");
+ ti->error = "hardware handler constructor failed";
return r;
}
@@ -669,7 +668,7 @@ static int parse_features(struct arg_set *as, struct multipath *m,
unsigned argc;
static struct param _params[] = {
- {0, 1, ESTR("invalid number of feature args")},
+ {0, 1, "invalid number of feature args"},
};
r = read_param(_params, shift(as), &argc, &ti->error);
@@ -692,8 +691,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
{
/* target parameters */
static struct param _params[] = {
- {1, 1024, ESTR("invalid number of priority groups")},
- {1, 1024, ESTR("invalid initial priority group number")},
+ {1, 1024, "invalid number of priority groups"},
+ {1, 1024, "invalid initial priority group number"},
};
int r;
@@ -707,7 +706,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
m = alloc_multipath();
if (!m) {
- ti->error = ESTR("can't allocate multipath");
+ ti->error = "can't allocate multipath";
return -EINVAL;
}
@@ -746,7 +745,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
}
if (pg_count != m->nr_priority_groups) {
- ti->error = ESTR("priority group count mismatch");
+ ti->error = "priority group count mismatch";
r = -EINVAL;
goto bad;
}
@@ -807,7 +806,7 @@ static int fail_path(struct pgpath *pgpath)
if (!pgpath->path.is_active)
goto out;
- DMWARN("dm-multipath: Failing path %s.", pgpath->path.dev->name);
+ DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->path.is_active = 0;
@@ -1250,7 +1249,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
r = dm_get_device(ti, argv[1], ti->begin, ti->len,
dm_table_get_mode(ti->table), &dev);
if (r) {
- DMWARN("dm-multipath message: error getting device %s",
+ DMWARN("message: error getting device %s",
argv[1]);
return -EINVAL;
}
@@ -1309,7 +1308,7 @@ static int __init dm_multipath_init(void)
return -ENOMEM;
}
- DMINFO("dm-multipath version %u.%u.%u loaded",
+ DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d12cf3e5e076..be48cedf986b 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -20,6 +20,8 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#define DM_MSG_PREFIX "raid1"
+
static struct workqueue_struct *_kmirrord_wq;
static struct work_struct _kmirrord_work;
@@ -106,12 +108,42 @@ struct region {
struct bio_list delayed_bios;
};
+
+/*-----------------------------------------------------------------
+ * Mirror set structures.
+ *---------------------------------------------------------------*/
+struct mirror {
+ atomic_t error_count;
+ struct dm_dev *dev;
+ sector_t offset;
+};
+
+struct mirror_set {
+ struct dm_target *ti;
+ struct list_head list;
+ struct region_hash rh;
+ struct kcopyd_client *kcopyd_client;
+
+ spinlock_t lock; /* protects the next two lists */
+ struct bio_list reads;
+ struct bio_list writes;
+
+ /* recovery */
+ region_t nr_regions;
+ int in_sync;
+
+ struct mirror *default_mirror; /* Default mirror */
+
+ unsigned int nr_mirrors;
+ struct mirror mirror[0];
+};
+
/*
* Conversion fns
*/
static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
{
- return bio->bi_sector >> rh->region_shift;
+ return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
}
static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
@@ -458,11 +490,9 @@ static int __rh_recovery_prepare(struct region_hash *rh)
/* Already quiesced ? */
if (atomic_read(&reg->pending))
list_del_init(&reg->list);
+ else
+ list_move(&reg->list, &rh->quiesced_regions);
- else {
- list_del_init(&reg->list);
- list_add(&reg->list, &rh->quiesced_regions);
- }
spin_unlock_irq(&rh->region_lock);
return 1;
@@ -541,35 +571,6 @@ static void rh_start_recovery(struct region_hash *rh)
wake();
}
-/*-----------------------------------------------------------------
- * Mirror set structures.
- *---------------------------------------------------------------*/
-struct mirror {
- atomic_t error_count;
- struct dm_dev *dev;
- sector_t offset;
-};
-
-struct mirror_set {
- struct dm_target *ti;
- struct list_head list;
- struct region_hash rh;
- struct kcopyd_client *kcopyd_client;
-
- spinlock_t lock; /* protects the next two lists */
- struct bio_list reads;
- struct bio_list writes;
-
- /* recovery */
- region_t nr_regions;
- int in_sync;
-
- struct mirror *default_mirror; /* Default mirror */
-
- unsigned int nr_mirrors;
- struct mirror mirror[0];
-};
-
/*
* Every mirror should look like this one.
*/
@@ -603,7 +604,7 @@ static void recovery_complete(int read_err, unsigned int write_err,
struct region *reg = (struct region *) context;
/* FIXME: better error handling */
- rh_recovery_end(reg, read_err || write_err);
+ rh_recovery_end(reg, !(read_err || write_err));
}
static int recover(struct mirror_set *ms, struct region *reg)
@@ -893,7 +894,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
ms = kmalloc(len, GFP_KERNEL);
if (!ms) {
- ti->error = "dm-mirror: Cannot allocate mirror context";
+ ti->error = "Cannot allocate mirror context";
return NULL;
}
@@ -907,7 +908,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
- ti->error = "dm-mirror: Error creating dirty region hash";
+ ti->error = "Error creating dirty region hash";
kfree(ms);
return NULL;
}
@@ -937,14 +938,14 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
unsigned long long offset;
if (sscanf(argv[1], "%llu", &offset) != 1) {
- ti->error = "dm-mirror: Invalid offset";
+ ti->error = "Invalid offset";
return -EINVAL;
}
if (dm_get_device(ti, argv[0], offset, ti->len,
dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) {
- ti->error = "dm-mirror: Device lookup failure";
+ ti->error = "Device lookup failure";
return -ENXIO;
}
@@ -981,30 +982,30 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
struct dirty_log *dl;
if (argc < 2) {
- ti->error = "dm-mirror: Insufficient mirror log arguments";
+ ti->error = "Insufficient mirror log arguments";
return NULL;
}
if (sscanf(argv[1], "%u", &param_count) != 1) {
- ti->error = "dm-mirror: Invalid mirror log argument count";
+ ti->error = "Invalid mirror log argument count";
return NULL;
}
*args_used = 2 + param_count;
if (argc < *args_used) {
- ti->error = "dm-mirror: Insufficient mirror log arguments";
+ ti->error = "Insufficient mirror log arguments";
return NULL;
}
dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
if (!dl) {
- ti->error = "dm-mirror: Error creating mirror dirty log";
+ ti->error = "Error creating mirror dirty log";
return NULL;
}
if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
- ti->error = "dm-mirror: Invalid region size";
+ ti->error = "Invalid region size";
dm_destroy_dirty_log(dl);
return NULL;
}
@@ -1038,7 +1039,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
- ti->error = "dm-mirror: Invalid number of mirrors";
+ ti->error = "Invalid number of mirrors";
dm_destroy_dirty_log(dl);
return -EINVAL;
}
@@ -1046,7 +1047,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++, argc--;
if (argc != nr_mirrors * 2) {
- ti->error = "dm-mirror: Wrong number of mirror arguments";
+ ti->error = "Wrong number of mirror arguments";
dm_destroy_dirty_log(dl);
return -EINVAL;
}
@@ -1115,7 +1116,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
struct mirror *m;
struct mirror_set *ms = ti->private;
- map_context->ll = bio->bi_sector >> ms->rh.region_shift;
+ map_context->ll = bio_to_region(&ms->rh, bio);
if (rw == WRITE) {
queue_bio(ms, bio, rw);
@@ -1221,7 +1222,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 0, 1},
+ .version = {1, 0, 2},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index d0024865a789..c5a16c550122 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
+#define DM_MSG_PREFIX "multipath round-robin"
+
/*-----------------------------------------------------------------
* Path-handling code, paths are held in lists
*---------------------------------------------------------------*/
@@ -191,9 +193,9 @@ static int __init dm_rr_init(void)
int r = dm_register_path_selector(&rr_ps);
if (r < 0)
- DMERR("round-robin: register failed %d", r);
+ DMERR("register failed %d", r);
- DMINFO("dm-round-robin version 1.0.0 loaded");
+ DMINFO("version 1.0.0 loaded");
return r;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 08312b46463a..8eea0ddbf5ec 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -23,6 +23,8 @@
#include "dm-bio-list.h"
#include "kcopyd.h"
+#define DM_MSG_PREFIX "snapshots"
+
/*
* The percentage increment we will wake up users at
*/
@@ -117,7 +119,7 @@ static int init_origin_hash(void)
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_origins) {
- DMERR("Device mapper: Snapshot: unable to allocate memory");
+ DMERR("unable to allocate memory");
return -ENOMEM;
}
@@ -412,7 +414,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int blocksize;
if (argc < 4) {
- ti->error = "dm-snapshot: requires exactly 4 arguments";
+ ti->error = "requires exactly 4 arguments";
r = -EINVAL;
goto bad1;
}
@@ -530,7 +532,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = s;
- ti->split_io = chunk_size;
+ ti->split_io = s->chunk_size;
return 0;
@@ -1127,7 +1129,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_dev *dev;
if (argc != 1) {
- ti->error = "dm-origin: incorrect number of arguments";
+ ti->error = "origin: incorrect number of arguments";
return -EINVAL;
}
@@ -1204,7 +1206,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 1, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -1215,7 +1217,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 1, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -1236,7 +1238,7 @@ static int __init dm_snapshot_init(void)
r = dm_register_target(&origin_target);
if (r < 0) {
- DMERR("Device mapper: Origin: register failed %d\n", r);
+ DMERR("Origin target register failed %d", r);
goto bad1;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 08328a8f5a3c..6c29fcecd892 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -12,6 +12,8 @@
#include <linux/bio.h>
#include <linux/slab.h>
+#define DM_MSG_PREFIX "striped"
+
struct stripe {
struct dm_dev *dev;
sector_t physical_start;
@@ -78,19 +80,19 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned int i;
if (argc < 2) {
- ti->error = "dm-stripe: Not enough arguments";
+ ti->error = "Not enough arguments";
return -EINVAL;
}
stripes = simple_strtoul(argv[0], &end, 10);
if (*end) {
- ti->error = "dm-stripe: Invalid stripe count";
+ ti->error = "Invalid stripe count";
return -EINVAL;
}
chunk_size = simple_strtoul(argv[1], &end, 10);
if (*end) {
- ti->error = "dm-stripe: Invalid chunk_size";
+ ti->error = "Invalid chunk_size";
return -EINVAL;
}
@@ -99,19 +101,19 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
if (!chunk_size || (chunk_size & (chunk_size - 1)) ||
(chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) {
- ti->error = "dm-stripe: Invalid chunk size";
+ ti->error = "Invalid chunk size";
return -EINVAL;
}
if (ti->len & (chunk_size - 1)) {
- ti->error = "dm-stripe: Target length not divisible by "
+ ti->error = "Target length not divisible by "
"chunk size";
return -EINVAL;
}
width = ti->len;
if (sector_div(width, stripes)) {
- ti->error = "dm-stripe: Target length not divisible by "
+ ti->error = "Target length not divisible by "
"number of stripes";
return -EINVAL;
}
@@ -120,14 +122,14 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
- ti->error = "dm-stripe: Not enough destinations "
+ ti->error = "Not enough destinations "
"specified";
return -EINVAL;
}
sc = alloc_context(stripes);
if (!sc) {
- ti->error = "dm-stripe: Memory allocation for striped context "
+ ti->error = "Memory allocation for striped context "
"failed";
return -ENOMEM;
}
@@ -149,8 +151,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = get_stripe(ti, sc, i, argv);
if (r < 0) {
- ti->error = "dm-stripe: Couldn't parse stripe "
- "destination";
+ ti->error = "Couldn't parse stripe destination";
while (i--)
dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
@@ -227,7 +228,7 @@ int __init dm_stripe_init(void)
r = dm_register_target(&stripe_target);
if (r < 0)
- DMWARN("striped target registration failed");
+ DMWARN("target registration failed");
return r;
}
@@ -235,7 +236,7 @@ int __init dm_stripe_init(void)
void dm_stripe_exit(void)
{
if (dm_unregister_target(&stripe_target))
- DMWARN("striped target unregistration failed");
+ DMWARN("target unregistration failed");
return;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f56a54cf0ce..75fe9493e6af 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -17,6 +17,8 @@
#include <linux/mutex.h>
#include <asm/atomic.h>
+#define DM_MSG_PREFIX "table"
+
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
@@ -237,6 +239,44 @@ int dm_table_create(struct dm_table **result, int mode,
return 0;
}
+int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
+{
+ struct dm_table *t;
+ sector_t dev_size = 1;
+ int r;
+
+ /*
+ * Find current size of device.
+ * Default to 1 sector if inactive.
+ */
+ t = dm_get_table(md);
+ if (t) {
+ dev_size = dm_table_get_size(t);
+ dm_table_put(t);
+ }
+
+ r = dm_table_create(&t, FMODE_READ, 1, md);
+ if (r)
+ return r;
+
+ r = dm_table_add_target(t, "error", 0, dev_size, NULL);
+ if (r)
+ goto out;
+
+ r = dm_table_complete(t);
+ if (r)
+ goto out;
+
+ *result = t;
+
+out:
+ if (r)
+ dm_table_put(t);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_create_error_table);
+
static void free_devices(struct list_head *devices)
{
struct list_head *tmp, *next;
@@ -590,6 +630,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
unsigned array_size = 0;
*argc = 0;
+
+ if (!input) {
+ *argvp = NULL;
+ return 0;
+ }
+
argv = realloc_argv(&array_size, argv);
if (!argv)
return -ENOMEM;
@@ -671,15 +717,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
memset(tgt, 0, sizeof(*tgt));
if (!len) {
- tgt->error = "zero-length target";
- DMERR("%s", tgt->error);
+ DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- tgt->error = "unknown target type";
- DMERR("%s", tgt->error);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+ type);
return -EINVAL;
}
@@ -716,7 +761,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return 0;
bad:
- DMERR("%s", tgt->error);
+ DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
dm_put_target_type(tgt->type);
return r;
}
@@ -802,7 +847,7 @@ sector_t dm_table_get_size(struct dm_table *t)
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
- if (index > t->num_targets)
+ if (index >= t->num_targets)
return NULL;
return t->targets + index;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 64fd8e79ea4c..477a041a41cf 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -12,6 +12,8 @@
#include <linux/bio.h>
#include <linux/slab.h>
+#define DM_MSG_PREFIX "target"
+
struct tt_internal {
struct target_type tt;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 51c0639b2487..ea569f7348d2 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -10,13 +10,15 @@
#include <linux/init.h>
#include <linux/bio.h>
+#define DM_MSG_PREFIX "zero"
+
/*
* Construct a dummy mapping that only returns zeros
*/
static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
if (argc != 0) {
- ti->error = "dm-zero: No arguments required";
+ ti->error = "No arguments required";
return -EINVAL;
}
@@ -60,7 +62,7 @@ static int __init dm_zero_init(void)
int r = dm_register_target(&zero_target);
if (r < 0)
- DMERR("zero: register failed %d", r);
+ DMERR("register failed %d", r);
return r;
}
@@ -70,7 +72,7 @@ static void __exit dm_zero_exit(void)
int r = dm_unregister_target(&zero_target);
if (r < 0)
- DMERR("zero: unregister failed %d", r);
+ DMERR("unregister failed %d", r);
}
module_init(dm_zero_init)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4d710b7a133b..3ed2e53b9eb6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -21,11 +21,14 @@
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
+#define DM_MSG_PREFIX "core"
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
+static DEFINE_SPINLOCK(_minor_lock);
/*
* One of these is allocated per bio.
*/
@@ -49,23 +52,28 @@ struct target_io {
union map_info *dm_get_mapinfo(struct bio *bio)
{
- if (bio && bio->bi_private)
- return &((struct target_io *)bio->bi_private)->info;
- return NULL;
+ if (bio && bio->bi_private)
+ return &((struct target_io *)bio->bi_private)->info;
+ return NULL;
}
+#define MINOR_ALLOCED ((void *)-1)
+
/*
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO 0
#define DMF_SUSPENDED 1
#define DMF_FROZEN 2
+#define DMF_FREEING 3
+#define DMF_DELETING 4
struct mapped_device {
struct rw_semaphore io_lock;
struct semaphore suspend_lock;
rwlock_t map_lock;
atomic_t holders;
+ atomic_t open_count;
unsigned long flags;
@@ -218,9 +226,25 @@ static int dm_blk_open(struct inode *inode, struct file *file)
{
struct mapped_device *md;
+ spin_lock(&_minor_lock);
+
md = inode->i_bdev->bd_disk->private_data;
+ if (!md)
+ goto out;
+
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ test_bit(DMF_DELETING, &md->flags)) {
+ md = NULL;
+ goto out;
+ }
+
dm_get(md);
- return 0;
+ atomic_inc(&md->open_count);
+
+out:
+ spin_unlock(&_minor_lock);
+
+ return md ? 0 : -ENXIO;
}
static int dm_blk_close(struct inode *inode, struct file *file)
@@ -228,10 +252,35 @@ static int dm_blk_close(struct inode *inode, struct file *file)
struct mapped_device *md;
md = inode->i_bdev->bd_disk->private_data;
+ atomic_dec(&md->open_count);
dm_put(md);
return 0;
}
+int dm_open_count(struct mapped_device *md)
+{
+ return atomic_read(&md->open_count);
+}
+
+/*
+ * Guarantees nothing is using the device before it's deleted.
+ */
+int dm_lock_for_deletion(struct mapped_device *md)
+{
+ int r = 0;
+
+ spin_lock(&_minor_lock);
+
+ if (dm_open_count(md))
+ r = -EBUSY;
+ else
+ set_bit(DMF_DELETING, &md->flags);
+
+ spin_unlock(&_minor_lock);
+
+ return r;
+}
+
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -456,8 +505,8 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
if (r > 0) {
/* the bio has been remapped so dispatch it */
- blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector,
+ blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
+ tio->io->bio->bi_bdev->bd_dev, sector,
clone->bi_sector);
generic_make_request(clone);
@@ -744,43 +793,39 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static DEFINE_MUTEX(_minor_lock);
static DEFINE_IDR(_minor_idr);
-static void free_minor(unsigned int minor)
+static void free_minor(int minor)
{
- mutex_lock(&_minor_lock);
+ spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
- mutex_unlock(&_minor_lock);
+ spin_unlock(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
-static int specific_minor(struct mapped_device *md, unsigned int minor)
+static int specific_minor(struct mapped_device *md, int minor)
{
int r, m;
if (minor >= (1 << MINORBITS))
return -EINVAL;
- mutex_lock(&_minor_lock);
+ r = idr_pre_get(&_minor_idr, GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ spin_lock(&_minor_lock);
if (idr_find(&_minor_idr, minor)) {
r = -EBUSY;
goto out;
}
- r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r) {
- r = -ENOMEM;
- goto out;
- }
-
- r = idr_get_new_above(&_minor_idr, md, minor, &m);
- if (r) {
+ r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
+ if (r)
goto out;
- }
if (m != minor) {
idr_remove(&_minor_idr, m);
@@ -789,24 +834,21 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
}
out:
- mutex_unlock(&_minor_lock);
+ spin_unlock(&_minor_lock);
return r;
}
-static int next_free_minor(struct mapped_device *md, unsigned int *minor)
+static int next_free_minor(struct mapped_device *md, int *minor)
{
- int r;
- unsigned int m;
-
- mutex_lock(&_minor_lock);
+ int r, m;
r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r) {
- r = -ENOMEM;
- goto out;
- }
+ if (!r)
+ return -ENOMEM;
+
+ spin_lock(&_minor_lock);
- r = idr_get_new(&_minor_idr, md, &m);
+ r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
if (r) {
goto out;
}
@@ -820,7 +862,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
*minor = m;
out:
- mutex_unlock(&_minor_lock);
+ spin_unlock(&_minor_lock);
return r;
}
@@ -829,18 +871,25 @@ static struct block_device_operations dm_blk_dops;
/*
* Allocate and initialise a blank device with a given minor.
*/
-static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
+static struct mapped_device *alloc_dev(int minor)
{
int r;
struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
+ void *old_md;
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
}
+ if (!try_module_get(THIS_MODULE))
+ goto bad0;
+
/* get a minor number for the dev */
- r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
+ if (minor == DM_ANY_MINOR)
+ r = next_free_minor(md, &minor);
+ else
+ r = specific_minor(md, minor);
if (r < 0)
goto bad1;
@@ -849,6 +898,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
init_MUTEX(&md->suspend_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
+ atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
md->queue = blk_alloc_queue(GFP_KERNEL);
@@ -875,6 +925,10 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
if (!md->disk)
goto bad4;
+ atomic_set(&md->pending, 0);
+ init_waitqueue_head(&md->wait);
+ init_waitqueue_head(&md->eventq);
+
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
@@ -884,9 +938,12 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
- atomic_set(&md->pending, 0);
- init_waitqueue_head(&md->wait);
- init_waitqueue_head(&md->eventq);
+ /* Populate the mapping, nobody knows we exist yet */
+ spin_lock(&_minor_lock);
+ old_md = idr_replace(&_minor_idr, md, minor);
+ spin_unlock(&_minor_lock);
+
+ BUG_ON(old_md != MINOR_ALLOCED);
return md;
@@ -898,13 +955,15 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
blk_cleanup_queue(md->queue);
free_minor(minor);
bad1:
+ module_put(THIS_MODULE);
+ bad0:
kfree(md);
return NULL;
}
static void free_dev(struct mapped_device *md)
{
- unsigned int minor = md->disk->first_minor;
+ int minor = md->disk->first_minor;
if (md->suspended_bdev) {
thaw_bdev(md->suspended_bdev, NULL);
@@ -914,8 +973,14 @@ static void free_dev(struct mapped_device *md)
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
free_minor(minor);
+
+ spin_lock(&_minor_lock);
+ md->disk->private_data = NULL;
+ spin_unlock(&_minor_lock);
+
put_disk(md->disk);
blk_cleanup_queue(md->queue);
+ module_put(THIS_MODULE);
kfree(md);
}
@@ -984,12 +1049,11 @@ static void __unbind(struct mapped_device *md)
/*
* Constructor for a new device.
*/
-static int create_aux(unsigned int minor, int persistent,
- struct mapped_device **result)
+int dm_create(int minor, struct mapped_device **result)
{
struct mapped_device *md;
- md = alloc_dev(minor, persistent);
+ md = alloc_dev(minor);
if (!md)
return -ENXIO;
@@ -997,16 +1061,6 @@ static int create_aux(unsigned int minor, int persistent,
return 0;
}
-int dm_create(struct mapped_device **result)
-{
- return create_aux(0, 0, result);
-}
-
-int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
-{
- return create_aux(minor, 1, result);
-}
-
static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
@@ -1015,13 +1069,18 @@ static struct mapped_device *dm_find_md(dev_t dev)
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
- mutex_lock(&_minor_lock);
+ spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
- if (!md || (dm_disk(md)->first_minor != minor))
+ if (md && (md == MINOR_ALLOCED ||
+ (dm_disk(md)->first_minor != minor) ||
+ test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
+ goto out;
+ }
- mutex_unlock(&_minor_lock);
+out:
+ spin_unlock(&_minor_lock);
return md;
}
@@ -1051,12 +1110,23 @@ void dm_get(struct mapped_device *md)
atomic_inc(&md->holders);
}
+const char *dm_device_name(struct mapped_device *md)
+{
+ return md->name;
+}
+EXPORT_SYMBOL_GPL(dm_device_name);
+
void dm_put(struct mapped_device *md)
{
struct dm_table *map;
- if (atomic_dec_and_test(&md->holders)) {
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
+
+ if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
map = dm_get_table(md);
+ idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
if (!dm_suspended(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index fd90bc8f9e45..3c03c0ecab7e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -2,7 +2,7 @@
* Internal header file for device mapper
*
* Copyright (C) 2001, 2002 Sistina Software
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -17,9 +17,10 @@
#include <linux/hdreg.h>
#define DM_NAME "device-mapper"
-#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
-#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
-#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
+
+#define DMERR(f, arg...) printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMWARN(f, arg...) printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMINFO(f, arg...) printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
@@ -39,83 +40,16 @@ struct dm_dev {
};
struct dm_table;
-struct mapped_device;
-
-/*-----------------------------------------------------------------
- * Functions for manipulating a struct mapped_device.
- * Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
-int dm_create(struct mapped_device **md);
-int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
-void dm_set_mdptr(struct mapped_device *md, void *ptr);
-void *dm_get_mdptr(struct mapped_device *md);
-struct mapped_device *dm_get_md(dev_t dev);
-
-/*
- * Reference counting for md.
- */
-void dm_get(struct mapped_device *md);
-void dm_put(struct mapped_device *md);
-
-/*
- * A device can still be used while suspended, but I/O is deferred.
- */
-int dm_suspend(struct mapped_device *md, int with_lockfs);
-int dm_resume(struct mapped_device *md);
-
-/*
- * The device must be suspended before calling this method.
- */
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
-
-/*
- * Drop a reference on the table when you've finished with the
- * result.
- */
-struct dm_table *dm_get_table(struct mapped_device *md);
-
-/*
- * Event functions.
- */
-uint32_t dm_get_event_nr(struct mapped_device *md);
-int dm_wait_event(struct mapped_device *md, int event_nr);
-
-/*
- * Info functions.
- */
-struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
-
-/*
- * Geometry functions.
- */
-int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
-int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*-----------------------------------------------------------------
- * Functions for manipulating a table. Tables are also reference
- * counted.
+ * Internal table functions.
*---------------------------------------------------------------*/
-int dm_table_create(struct dm_table **result, int mode,
- unsigned num_targets, struct mapped_device *md);
-
-void dm_table_get(struct dm_table *t);
-void dm_table_put(struct dm_table *t);
-
-int dm_table_add_target(struct dm_table *t, const char *type,
- sector_t start, sector_t len, char *params);
-int dm_table_complete(struct dm_table *t);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
-void dm_table_event(struct dm_table *t);
-sector_t dm_table_get_size(struct dm_table *t);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
struct list_head *dm_table_get_devices(struct dm_table *t);
-int dm_table_get_mode(struct dm_table *t);
-struct mapped_device *dm_table_get_md(struct dm_table *t);
void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
void dm_table_resume_targets(struct dm_table *t);
@@ -133,7 +67,6 @@ void dm_put_target_type(struct target_type *t);
int dm_target_iterate(void (*iter_func)(struct target_type *tt,
void *param), void *param);
-
/*-----------------------------------------------------------------
* Useful inlines.
*---------------------------------------------------------------*/
@@ -191,5 +124,7 @@ void dm_stripe_exit(void);
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
union map_info *dm_get_mapinfo(struct bio *bio);
+int dm_open_count(struct mapped_device *md);
+int dm_lock_for_deletion(struct mapped_device *md);
#endif
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 72480a48d88b..73ab875fb158 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -314,7 +314,7 @@ static void complete_io(unsigned long error, void *context)
if (error) {
if (job->rw == WRITE)
- job->write_err &= error;
+ job->write_err |= error;
else
job->read_err = 1;
@@ -460,7 +460,7 @@ static void segment_complete(int read_err,
job->read_err = 1;
if (write_err)
- job->write_err &= write_err;
+ job->write_err |= write_err;
/*
* Only dispatch more work if there hasn't been an error.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 777585458c85..ff83c9b5979e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -111,7 +111,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
return ret;
}
-static int linear_run (mddev_t *mddev)
+static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
{
linear_conf_t *conf;
dev_info_t **table;
@@ -121,20 +121,21 @@ static int linear_run (mddev_t *mddev)
sector_t curr_offset;
struct list_head *tmp;
- conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t),
+ conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
GFP_KERNEL);
if (!conf)
- goto out;
+ return NULL;
+
mddev->private = conf;
cnt = 0;
- mddev->array_size = 0;
+ conf->array_size = 0;
ITERATE_RDEV(mddev,rdev,tmp) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
- if (j < 0 || j > mddev->raid_disks || disk->rdev) {
+ if (j < 0 || j > raid_disks || disk->rdev) {
printk("linear: disk numbering problem. Aborting!\n");
goto out;
}
@@ -152,11 +153,11 @@ static int linear_run (mddev_t *mddev)
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->size = rdev->size;
- mddev->array_size += rdev->size;
+ conf->array_size += rdev->size;
cnt++;
}
- if (cnt != mddev->raid_disks) {
+ if (cnt != raid_disks) {
printk("linear: not enough drives present. Aborting!\n");
goto out;
}
@@ -200,7 +201,7 @@ static int linear_run (mddev_t *mddev)
unsigned round;
unsigned long base;
- sz = mddev->array_size >> conf->preshift;
+ sz = conf->array_size >> conf->preshift;
sz += 1; /* force round-up */
base = conf->hash_spacing >> conf->preshift;
round = sector_div(sz, base);
@@ -247,14 +248,56 @@ static int linear_run (mddev_t *mddev)
BUG_ON(table - conf->hash_table > nb_zone);
+ return conf;
+
+out:
+ kfree(conf);
+ return NULL;
+}
+
+static int linear_run (mddev_t *mddev)
+{
+ linear_conf_t *conf;
+
+ conf = linear_conf(mddev, mddev->raid_disks);
+
+ if (!conf)
+ return 1;
+ mddev->private = conf;
+ mddev->array_size = conf->array_size;
+
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
mddev->queue->issue_flush_fn = linear_issue_flush;
return 0;
+}
-out:
- kfree(conf);
- return 1;
+static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ /* Adding a drive to a linear array allows the array to grow.
+ * It is permitted if the new drive has a matching superblock
+ * already on it, with raid_disk equal to raid_disks.
+ * It is achieved by creating a new linear_private_data structure
+ * and swapping it in in-place of the current one.
+ * The current one is never freed until the array is stopped.
+ * This avoids races.
+ */
+ linear_conf_t *newconf;
+
+ if (rdev->raid_disk != mddev->raid_disks)
+ return -EINVAL;
+
+ newconf = linear_conf(mddev,mddev->raid_disks+1);
+
+ if (!newconf)
+ return -ENOMEM;
+
+ newconf->prev = mddev_to_conf(mddev);
+ mddev->private = newconf;
+ mddev->raid_disks++;
+ mddev->array_size = newconf->array_size;
+ set_capacity(mddev->gendisk, mddev->array_size << 1);
+ return 0;
}
static int linear_stop (mddev_t *mddev)
@@ -262,8 +305,12 @@ static int linear_stop (mddev_t *mddev)
linear_conf_t *conf = mddev_to_conf(mddev);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- kfree(conf->hash_table);
- kfree(conf);
+ do {
+ linear_conf_t *t = conf->prev;
+ kfree(conf->hash_table);
+ kfree(conf);
+ conf = t;
+ } while (conf);
return 0;
}
@@ -360,6 +407,7 @@ static struct mdk_personality linear_personality =
.run = linear_run,
.stop = linear_stop,
.status = linear_status,
+ .hot_add_disk = linear_add,
};
static int __init linear_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f19b874753a9..306268ec99ff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -44,6 +44,7 @@
#include <linux/suspend.h>
#include <linux/poll.h>
#include <linux/mutex.h>
+#include <linux/ctype.h>
#include <linux/init.h>
@@ -72,6 +73,10 @@ static void autostart_arrays (int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
+static void md_print_devices(void);
+
+#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
+
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
@@ -170,7 +175,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
-void md_new_event_inintr(mddev_t *mddev)
+static void md_new_event_inintr(mddev_t *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
@@ -732,6 +737,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
+ __u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
rdev->flags = 0;
@@ -748,7 +754,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->size = sb->size;
- mddev->events = md_event(sb);
+ mddev->events = ev1;
mddev->bitmap_offset = 0;
mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
@@ -797,7 +803,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling */
- __u64 ev1 = md_event(sb);
++ev1;
if (ev1 < mddev->events)
return -EINVAL;
@@ -805,19 +810,21 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
- __u64 ev1 = md_event(sb);
if (ev1 < mddev->bitmap->events_cleared)
return 0;
- } else /* just a hot-add of a new device, leave raid_disk at -1 */
- return 0;
+ } else {
+ if (ev1 < mddev->events)
+ /* just a hot-add of a new device, leave raid_disk at -1 */
+ return 0;
+ }
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
- else if (desc->state & (1<<MD_DISK_SYNC) &&
- desc->raid_disk < mddev->raid_disks) {
+ else if (desc->state & (1<<MD_DISK_SYNC) /* &&
+ desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
}
@@ -1100,6 +1107,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ __u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
rdev->flags = 0;
@@ -1115,7 +1123,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->size = le64_to_cpu(sb->size)/2;
- mddev->events = le64_to_cpu(sb->events);
+ mddev->events = ev1;
mddev->bitmap_offset = 0;
mddev->default_bitmap_offset = 1024 >> 9;
@@ -1149,7 +1157,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling */
- __u64 ev1 = le64_to_cpu(sb->events);
++ev1;
if (ev1 < mddev->events)
return -EINVAL;
@@ -1157,12 +1164,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
- __u64 ev1 = le64_to_cpu(sb->events);
if (ev1 < mddev->bitmap->events_cleared)
return 0;
- } else /* just a hot-add of a new device, leave raid_disk at -1 */
- return 0;
-
+ } else {
+ if (ev1 < mddev->events)
+ /* just a hot-add of a new device, leave raid_disk at -1 */
+ return 0;
+ }
if (mddev->level != LEVEL_MULTIPATH) {
int role;
rdev->desc_nr = le32_to_cpu(sb->dev_number);
@@ -1174,7 +1182,11 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(Faulty, &rdev->flags);
break;
default:
- set_bit(In_sync, &rdev->flags);
+ if ((le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_OFFSET))
+ rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
+ else
+ set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
@@ -1198,6 +1210,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->feature_map = 0;
sb->pad0 = 0;
+ sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad1, 0, sizeof(sb->pad1));
memset(sb->pad2, 0, sizeof(sb->pad2));
memset(sb->pad3, 0, sizeof(sb->pad3));
@@ -1218,6 +1231,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
+
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset > 0) {
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+ sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
+ }
+
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
@@ -1242,11 +1263,12 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
+ else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
+ sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
- sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
sb->sb_csum = calc_sb_1_csum(sb);
}
@@ -1507,7 +1529,7 @@ static void print_rdev(mdk_rdev_t *rdev)
printk(KERN_INFO "md: no rdev superblock!\n");
}
-void md_print_devices(void)
+static void md_print_devices(void)
{
struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev;
@@ -1536,15 +1558,30 @@ void md_print_devices(void)
}
-static void sync_sbs(mddev_t * mddev)
+static void sync_sbs(mddev_t * mddev, int nospares)
{
+ /* Update each superblock (in-memory image), but
+ * if we are allowed to, skip spares which already
+ * have the right event counter, or have one earlier
+ * (which would mean they aren't being marked as dirty
+ * with the rest of the array)
+ */
mdk_rdev_t *rdev;
struct list_head *tmp;
ITERATE_RDEV(mddev,rdev,tmp) {
- super_types[mddev->major_version].
- sync_super(mddev, rdev);
- rdev->sb_loaded = 1;
+ if (rdev->sb_events == mddev->events ||
+ (nospares &&
+ rdev->raid_disk < 0 &&
+ (rdev->sb_events&1)==0 &&
+ rdev->sb_events+1 == mddev->events)) {
+ /* Don't update this superblock */
+ rdev->sb_loaded = 2;
+ } else {
+ super_types[mddev->major_version].
+ sync_super(mddev, rdev);
+ rdev->sb_loaded = 1;
+ }
}
}
@@ -1554,12 +1591,42 @@ void md_update_sb(mddev_t * mddev)
struct list_head *tmp;
mdk_rdev_t *rdev;
int sync_req;
+ int nospares = 0;
repeat:
spin_lock_irq(&mddev->write_lock);
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
- mddev->events ++;
+ if (mddev->sb_dirty == 3)
+ /* just a clean<-> dirty transition, possibly leave spares alone,
+ * though if events isn't the right even/odd, we will have to do
+ * spares after all
+ */
+ nospares = 1;
+
+ /* If this is just a dirty<->clean transition, and the array is clean
+ * and 'events' is odd, we can roll back to the previous clean state */
+ if (mddev->sb_dirty == 3
+ && (mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && (mddev->events & 1))
+ mddev->events--;
+ else {
+ /* otherwise we have to go forward and ... */
+ mddev->events ++;
+ if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
+ /* .. if the array isn't clean, insist on an odd 'events' */
+ if ((mddev->events&1)==0) {
+ mddev->events++;
+ nospares = 0;
+ }
+ } else {
+ /* otherwise insist on an even 'events' (for clean states) */
+ if ((mddev->events&1)) {
+ mddev->events++;
+ nospares = 0;
+ }
+ }
+ }
if (!mddev->events) {
/*
@@ -1571,7 +1638,7 @@ repeat:
mddev->events --;
}
mddev->sb_dirty = 2;
- sync_sbs(mddev);
+ sync_sbs(mddev, nospares);
/*
* do not write anything to disk if using
@@ -1593,6 +1660,8 @@ repeat:
ITERATE_RDEV(mddev,rdev,tmp) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
+ if (rdev->sb_loaded != 1)
+ continue; /* no noise on spare devices */
if (test_bit(Faulty, &rdev->flags))
dprintk("(skipping faulty ");
@@ -1604,6 +1673,7 @@ repeat:
dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)rdev->sb_offset);
+ rdev->sb_events = mddev->events;
} else
dprintk(")\n");
@@ -1667,6 +1737,10 @@ state_show(mdk_rdev_t *rdev, char *page)
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ len += sprintf(page+len, "%swrite_mostly",sep);
+ sep = ",";
+ }
if (!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sspare", sep);
@@ -1675,8 +1749,40 @@ state_show(mdk_rdev_t *rdev, char *page)
return len+sprintf(page+len, "\n");
}
+static ssize_t
+state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ /* can write
+ * faulty - simulates and error
+ * remove - disconnects the device
+ * writemostly - sets write_mostly
+ * -writemostly - clears write_mostly
+ */
+ int err = -EINVAL;
+ if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
+ md_error(rdev->mddev, rdev);
+ err = 0;
+ } else if (cmd_match(buf, "remove")) {
+ if (rdev->raid_disk >= 0)
+ err = -EBUSY;
+ else {
+ mddev_t *mddev = rdev->mddev;
+ kick_rdev_from_array(rdev);
+ md_update_sb(mddev);
+ md_new_event(mddev);
+ err = 0;
+ }
+ } else if (cmd_match(buf, "writemostly")) {
+ set_bit(WriteMostly, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-writemostly")) {
+ clear_bit(WriteMostly, &rdev->flags);
+ err = 0;
+ }
+ return err ? err : len;
+}
static struct rdev_sysfs_entry
-rdev_state = __ATTR_RO(state);
+rdev_state = __ATTR(state, 0644, state_show, state_store);
static ssize_t
super_show(mdk_rdev_t *rdev, char *page)
@@ -1873,6 +1979,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
rdev->desc_nr = -1;
rdev->flags = 0;
rdev->data_offset = 0;
+ rdev->sb_events = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
@@ -1978,6 +2085,54 @@ static void analyze_sbs(mddev_t * mddev)
}
static ssize_t
+safe_delay_show(mddev_t *mddev, char *page)
+{
+ int msec = (mddev->safemode_delay*1000)/HZ;
+ return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
+}
+static ssize_t
+safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
+{
+ int scale=1;
+ int dot=0;
+ int i;
+ unsigned long msec;
+ char buf[30];
+ char *e;
+ /* remove a period, and count digits after it */
+ if (len >= sizeof(buf))
+ return -EINVAL;
+ strlcpy(buf, cbuf, len);
+ buf[len] = 0;
+ for (i=0; i<len; i++) {
+ if (dot) {
+ if (isdigit(buf[i])) {
+ buf[i-1] = buf[i];
+ scale *= 10;
+ }
+ buf[i] = 0;
+ } else if (buf[i] == '.') {
+ dot=1;
+ buf[i] = 0;
+ }
+ }
+ msec = simple_strtoul(buf, &e, 10);
+ if (e == buf || (*e && *e != '\n'))
+ return -EINVAL;
+ msec = (msec * 1000) / scale;
+ if (msec == 0)
+ mddev->safemode_delay = 0;
+ else {
+ mddev->safemode_delay = (msec*HZ)/1000;
+ if (mddev->safemode_delay == 0)
+ mddev->safemode_delay = 1;
+ }
+ return len;
+}
+static struct md_sysfs_entry md_safe_delay =
+__ATTR(safe_mode_delay, 0644,safe_delay_show, safe_delay_store);
+
+static ssize_t
level_show(mddev_t *mddev, char *page)
{
struct mdk_personality *p = mddev->pers;
@@ -2012,6 +2167,32 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_level =
__ATTR(level, 0644, level_show, level_store);
+
+static ssize_t
+layout_show(mddev_t *mddev, char *page)
+{
+ /* just a number, not meaningful for all levels */
+ return sprintf(page, "%d\n", mddev->layout);
+}
+
+static ssize_t
+layout_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (mddev->pers)
+ return -EBUSY;
+
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ mddev->layout = n;
+ return len;
+}
+static struct md_sysfs_entry md_layout =
+__ATTR(layout, 0655, layout_show, layout_store);
+
+
static ssize_t
raid_disks_show(mddev_t *mddev, char *page)
{
@@ -2067,6 +2248,200 @@ static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
static ssize_t
+resync_start_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
+}
+
+static ssize_t
+resync_start_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* can only set chunk_size if array is not yet active */
+ char *e;
+ unsigned long long n = simple_strtoull(buf, &e, 10);
+
+ if (mddev->pers)
+ return -EBUSY;
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ mddev->recovery_cp = n;
+ return len;
+}
+static struct md_sysfs_entry md_resync_start =
+__ATTR(resync_start, 0644, resync_start_show, resync_start_store);
+
+/*
+ * The array state can be:
+ *
+ * clear
+ * No devices, no size, no level
+ * Equivalent to STOP_ARRAY ioctl
+ * inactive
+ * May have some settings, but array is not active
+ * all IO results in error
+ * When written, doesn't tear down array, but just stops it
+ * suspended (not supported yet)
+ * All IO requests will block. The array can be reconfigured.
+ * Writing this, if accepted, will block until array is quiessent
+ * readonly
+ * no resync can happen. no superblocks get written.
+ * write requests fail
+ * read-auto
+ * like readonly, but behaves like 'clean' on a write request.
+ *
+ * clean - no pending writes, but otherwise active.
+ * When written to inactive array, starts without resync
+ * If a write request arrives then
+ * if metadata is known, mark 'dirty' and switch to 'active'.
+ * if not known, block and switch to write-pending
+ * If written to an active array that has pending writes, then fails.
+ * active
+ * fully active: IO and resync can be happening.
+ * When written to inactive array, starts with resync
+ *
+ * write-pending
+ * clean, but writes are blocked waiting for 'active' to be written.
+ *
+ * active-idle
+ * like active, but no writes have been seen for a while (100msec).
+ *
+ */
+enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
+ write_pending, active_idle, bad_word};
+static char *array_states[] = {
+ "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
+ "write-pending", "active-idle", NULL };
+
+static int match_word(const char *word, char **list)
+{
+ int n;
+ for (n=0; list[n]; n++)
+ if (cmd_match(word, list[n]))
+ break;
+ return n;
+}
+
+static ssize_t
+array_state_show(mddev_t *mddev, char *page)
+{
+ enum array_state st = inactive;
+
+ if (mddev->pers)
+ switch(mddev->ro) {
+ case 1:
+ st = readonly;
+ break;
+ case 2:
+ st = read_auto;
+ break;
+ case 0:
+ if (mddev->in_sync)
+ st = clean;
+ else if (mddev->safemode)
+ st = active_idle;
+ else
+ st = active;
+ }
+ else {
+ if (list_empty(&mddev->disks) &&
+ mddev->raid_disks == 0 &&
+ mddev->size == 0)
+ st = clear;
+ else
+ st = inactive;
+ }
+ return sprintf(page, "%s\n", array_states[st]);
+}
+
+static int do_md_stop(mddev_t * mddev, int ro);
+static int do_md_run(mddev_t * mddev);
+static int restart_array(mddev_t *mddev);
+
+static ssize_t
+array_state_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int err = -EINVAL;
+ enum array_state st = match_word(buf, array_states);
+ switch(st) {
+ case bad_word:
+ break;
+ case clear:
+ /* stopping an active array */
+ if (mddev->pers) {
+ if (atomic_read(&mddev->active) > 1)
+ return -EBUSY;
+ err = do_md_stop(mddev, 0);
+ }
+ break;
+ case inactive:
+ /* stopping an active array */
+ if (mddev->pers) {
+ if (atomic_read(&mddev->active) > 1)
+ return -EBUSY;
+ err = do_md_stop(mddev, 2);
+ }
+ break;
+ case suspended:
+ break; /* not supported yet */
+ case readonly:
+ if (mddev->pers)
+ err = do_md_stop(mddev, 1);
+ else {
+ mddev->ro = 1;
+ err = do_md_run(mddev);
+ }
+ break;
+ case read_auto:
+ /* stopping an active array */
+ if (mddev->pers) {
+ err = do_md_stop(mddev, 1);
+ if (err == 0)
+ mddev->ro = 2; /* FIXME mark devices writable */
+ } else {
+ mddev->ro = 2;
+ err = do_md_run(mddev);
+ }
+ break;
+ case clean:
+ if (mddev->pers) {
+ restart_array(mddev);
+ spin_lock_irq(&mddev->write_lock);
+ if (atomic_read(&mddev->writes_pending) == 0) {
+ mddev->in_sync = 1;
+ mddev->sb_dirty = 1;
+ }
+ spin_unlock_irq(&mddev->write_lock);
+ } else {
+ mddev->ro = 0;
+ mddev->recovery_cp = MaxSector;
+ err = do_md_run(mddev);
+ }
+ break;
+ case active:
+ if (mddev->pers) {
+ restart_array(mddev);
+ mddev->sb_dirty = 0;
+ wake_up(&mddev->sb_wait);
+ err = 0;
+ } else {
+ mddev->ro = 0;
+ err = do_md_run(mddev);
+ }
+ break;
+ case write_pending:
+ case active_idle:
+ /* these cannot be set */
+ break;
+ }
+ if (err)
+ return err;
+ else
+ return len;
+}
+static struct md_sysfs_entry md_array_state = __ATTR(array_state, 0644, array_state_show, array_state_store);
+
+static ssize_t
null_show(mddev_t *mddev, char *page)
{
return -EINVAL;
@@ -2428,11 +2803,15 @@ __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
+ &md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
+ &md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
+ &md_safe_delay.attr,
+ &md_array_state.attr,
NULL,
};
@@ -2553,8 +2932,6 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
return NULL;
}
-void md_wakeup_thread(mdk_thread_t *thread);
-
static void md_safemode_timeout(unsigned long data)
{
mddev_t *mddev = (mddev_t *) data;
@@ -2708,7 +3085,7 @@ static int do_md_run(mddev_t * mddev)
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
- mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
+ mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
ITERATE_RDEV(mddev,rdev,tmp)
@@ -2736,6 +3113,36 @@ static int do_md_run(mddev_t * mddev)
mddev->queue->queuedata = mddev;
mddev->queue->make_request_fn = mddev->pers->make_request;
+ /* If there is a partially-recovered drive we need to
+ * start recovery here. If we leave it to md_check_recovery,
+ * it will remove the drives and not do the right thing
+ */
+ if (mddev->degraded) {
+ struct list_head *rtmp;
+ int spares = 0;
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ /* complete an interrupted recovery */
+ spares++;
+ if (spares && mddev->pers->sync_request) {
+ mddev->recovery = 0;
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ mddev->sync_thread = md_register_thread(md_do_sync,
+ mddev,
+ "%s_resync");
+ if (!mddev->sync_thread) {
+ printk(KERN_ERR "%s: could not start resync"
+ " thread...\n",
+ mdname(mddev));
+ /* leave the spares where they are, it shouldn't hurt */
+ mddev->recovery = 0;
+ } else
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ }
+
mddev->changed = 1;
md_new_event(mddev);
return 0;
@@ -2769,18 +3176,47 @@ static int restart_array(mddev_t *mddev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread);
err = 0;
- } else {
- printk(KERN_ERR "md: %s has no personality assigned.\n",
- mdname(mddev));
+ } else
err = -EINVAL;
- }
out:
return err;
}
-static int do_md_stop(mddev_t * mddev, int ro)
+/* similar to deny_write_access, but accounts for our holding a reference
+ * to the file ourselves */
+static int deny_bitmap_write_access(struct file * file)
+{
+ struct inode *inode = file->f_mapping->host;
+
+ spin_lock(&inode->i_lock);
+ if (atomic_read(&inode->i_writecount) > 1) {
+ spin_unlock(&inode->i_lock);
+ return -ETXTBSY;
+ }
+ atomic_set(&inode->i_writecount, -1);
+ spin_unlock(&inode->i_lock);
+
+ return 0;
+}
+
+static void restore_bitmap_write_access(struct file *file)
+{
+ struct inode *inode = file->f_mapping->host;
+
+ spin_lock(&inode->i_lock);
+ atomic_set(&inode->i_writecount, 1);
+ spin_unlock(&inode->i_lock);
+}
+
+/* mode:
+ * 0 - completely stop and dis-assemble array
+ * 1 - switch to readonly
+ * 2 - stop but do not disassemble array
+ */
+static int do_md_stop(mddev_t * mddev, int mode)
{
int err = 0;
struct gendisk *disk = mddev->gendisk;
@@ -2792,6 +3228,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
}
if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
@@ -2801,12 +3238,15 @@ static int do_md_stop(mddev_t * mddev, int ro)
invalidate_partition(disk, 0);
- if (ro) {
+ switch(mode) {
+ case 1: /* readonly */
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
- } else {
+ break;
+ case 0: /* disassemble */
+ case 2: /* stop */
bitmap_flush(mddev);
md_super_wait(mddev);
if (mddev->ro)
@@ -2821,19 +3261,20 @@ static int do_md_stop(mddev_t * mddev, int ro)
if (mddev->ro)
mddev->ro = 0;
}
- if (!mddev->in_sync) {
+ if (!mddev->in_sync || mddev->sb_dirty) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev);
}
- if (ro)
+ if (mode == 1)
set_disk_ro(disk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
/*
* Free resources if final stop
*/
- if (!ro) {
+ if (mode == 0) {
mdk_rdev_t *rdev;
struct list_head *tmp;
struct gendisk *disk;
@@ -2841,7 +3282,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
bitmap_destroy(mddev);
if (mddev->bitmap_file) {
- atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
+ restore_bitmap_write_access(mddev->bitmap_file);
fput(mddev->bitmap_file);
mddev->bitmap_file = NULL;
}
@@ -2857,11 +3298,15 @@ static int do_md_stop(mddev_t * mddev, int ro)
export_array(mddev);
mddev->array_size = 0;
+ mddev->size = 0;
+ mddev->raid_disks = 0;
+ mddev->recovery_cp = 0;
+
disk = mddev->gendisk;
if (disk)
set_capacity(disk, 0);
mddev->changed = 1;
- } else
+ } else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
mdname(mddev));
err = 0;
@@ -3264,6 +3709,17 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
+ if (!err && !mddev->pers->hot_remove_disk) {
+ /* If there is hot_add_disk but no hot_remove_disk
+ * then added disks for geometry changes,
+ * and should be added immediately.
+ */
+ super_types[mddev->major_version].
+ validate_super(mddev, rdev);
+ err = mddev->pers->hot_add_disk(mddev, rdev);
+ if (err)
+ unbind_rdev_from_array(rdev);
+ }
if (err)
export_rdev(rdev);
@@ -3434,23 +3890,6 @@ abort_export:
return err;
}
-/* similar to deny_write_access, but accounts for our holding a reference
- * to the file ourselves */
-static int deny_bitmap_write_access(struct file * file)
-{
- struct inode *inode = file->f_mapping->host;
-
- spin_lock(&inode->i_lock);
- if (atomic_read(&inode->i_writecount) > 1) {
- spin_unlock(&inode->i_lock);
- return -ETXTBSY;
- }
- atomic_set(&inode->i_writecount, -1);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-
static int set_bitmap_file(mddev_t *mddev, int fd)
{
int err;
@@ -3491,12 +3930,17 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
mddev->pers->quiesce(mddev, 1);
if (fd >= 0)
err = bitmap_create(mddev);
- if (fd < 0 || err)
+ if (fd < 0 || err) {
bitmap_destroy(mddev);
+ fd = -1; /* make sure to put the file */
+ }
mddev->pers->quiesce(mddev, 0);
- } else if (fd < 0) {
- if (mddev->bitmap_file)
+ }
+ if (fd < 0) {
+ if (mddev->bitmap_file) {
+ restore_bitmap_write_access(mddev->bitmap_file);
fput(mddev->bitmap_file);
+ }
mddev->bitmap_file = NULL;
}
@@ -3977,11 +4421,6 @@ static int md_ioctl(struct inode *inode, struct file *file,
goto done_unlock;
default:
- if (_IOC_TYPE(cmd) == MD_MAJOR)
- printk(KERN_WARNING "md: %s(pid %d) used"
- " obsolete MD ioctl, upgrade your"
- " software to use new ictls.\n",
- current->comm, current->pid);
err = -EINVAL;
goto abort_unlock;
}
@@ -4586,7 +5025,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- mddev->sb_dirty = 1;
+ mddev->sb_dirty = 3;
md_wakeup_thread(mddev->thread);
}
spin_unlock_irq(&mddev->write_lock);
@@ -4599,7 +5038,7 @@ void md_write_end(mddev_t *mddev)
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
- else
+ else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
@@ -4620,10 +5059,14 @@ void md_do_sync(mddev_t *mddev)
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
+ struct list_head *rtmp;
+ mdk_rdev_t *rdev;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
+ if (mddev->ro) /* never try to sync a read-only array */
+ return;
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
@@ -4682,17 +5125,30 @@ void md_do_sync(mddev_t *mddev)
}
} while (mddev->curr_resync < 2);
+ j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
mddev->resync_mismatches = 0;
+ /* we don't use the checkpoint if there's a bitmap */
+ if (!mddev->bitmap &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->size << 1;
- else
+ else {
/* recovery follows the physical size of devices */
max_sectors = mddev->size << 1;
+ j = MaxSector;
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < j)
+ j = rdev->recovery_offset;
+ }
printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
@@ -4702,12 +5158,7 @@ void md_do_sync(mddev_t *mddev)
speed_max(mddev));
is_mddev_idle(mddev); /* this also initializes IO event counters */
- /* we don't use the checkpoint if there's a bitmap */
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
- && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- j = mddev->recovery_cp;
- else
- j = 0;
+
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
@@ -4828,15 +5279,28 @@ void md_do_sync(mddev_t *mddev)
if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2 &&
- mddev->curr_resync >= mddev->recovery_cp) {
- if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
- printk(KERN_INFO
- "md: checkpointing recovery of %s.\n",
- mdname(mddev));
- mddev->recovery_cp = mddev->curr_resync;
- } else
- mddev->recovery_cp = MaxSector;
+ mddev->curr_resync > 2) {
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (mddev->curr_resync >= mddev->recovery_cp) {
+ printk(KERN_INFO
+ "md: checkpointing recovery of %s.\n",
+ mdname(mddev));
+ mddev->recovery_cp = mddev->curr_resync;
+ }
+ } else
+ mddev->recovery_cp = MaxSector;
+ } else {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync = MaxSector;
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < mddev->curr_resync)
+ rdev->recovery_offset = mddev->curr_resync;
+ mddev->sb_dirty = 1;
+ }
}
skip:
@@ -4908,7 +5372,7 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
- mddev->sb_dirty = 1;
+ mddev->sb_dirty = 3;
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -4957,6 +5421,8 @@ void md_check_recovery(mddev_t *mddev)
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
+ if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ goto unlock;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
@@ -4979,6 +5445,7 @@ void md_check_recovery(mddev_t *mddev)
ITERATE_RDEV(mddev,rdev,rtmp)
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
if (mddev->pers->hot_add_disk(mddev,rdev)) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
@@ -5216,7 +5683,6 @@ EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
-EXPORT_SYMBOL(md_print_devices);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4070eff6f0f8..cead918578a7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -374,26 +374,26 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
* already.
*/
if (atomic_dec_and_test(&r1_bio->remaining)) {
- if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
+ if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
reschedule_retry(r1_bio);
- goto out;
- }
- /* it really is the end of this request */
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = bio->bi_vcnt;
- while (i--)
- safe_put_page(bio->bi_io_vec[i].bv_page);
+ else {
+ /* it really is the end of this request */
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ /* free extra copy of the data pages */
+ int i = bio->bi_vcnt;
+ while (i--)
+ safe_put_page(bio->bi_io_vec[i].bv_page);
+ }
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ behind);
+ md_write_end(r1_bio->mddev);
+ raid_end_bio_io(r1_bio);
}
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- behind);
- md_write_end(r1_bio->mddev);
- raid_end_bio_io(r1_bio);
}
- out:
+
if (to_put)
bio_put(to_put);
@@ -1625,6 +1625,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
+ if (mddev->bitmap == NULL &&
+ mddev->recovery_cp == MaxSector &&
+ conf->fullsync == 0) {
+ *skipped = 1;
+ return max_sector - sector_nr;
+ }
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
@@ -1888,7 +1894,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + i;
- if (!disk->rdev) {
+ if (!disk->rdev ||
+ !test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
mddev->degraded++;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1440935414e6..7f636283a1ba 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -29,6 +29,7 @@
* raid_disks
* near_copies (stored in low byte of layout)
* far_copies (stored in second byte of layout)
+ * far_offset (stored in bit 16 of layout )
*
* The data to be stored is divided into chunks using chunksize.
* Each device is divided into far_copies sections.
@@ -36,10 +37,14 @@
* near_copies copies of each chunk is stored (each on a different drive).
* The starting device for each section is offset near_copies from the starting
* device of the previous section.
- * Thus there are (near_copies*far_copies) of each chunk, and each is on a different
+ * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
* drive.
* near_copies and far_copies must be at least one, and their product is at most
* raid_disks.
+ *
+ * If far_offset is true, then the far_copies are handled a bit differently.
+ * The copies are still in different stripes, but instead of be very far apart
+ * on disk, there are adjacent stripes.
*/
/*
@@ -357,8 +362,7 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
* With this layout, and block is never stored twice on the one device.
*
* raid10_find_phys finds the sector offset of a given virtual sector
- * on each device that it is on. If a block isn't on a device,
- * that entry in the array is set to MaxSector.
+ * on each device that it is on.
*
* raid10_find_virt does the reverse mapping, from a device and a
* sector offset to a virtual address
@@ -381,6 +385,8 @@ static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
chunk *= conf->near_copies;
stripe = chunk;
dev = sector_div(stripe, conf->raid_disks);
+ if (conf->far_offset)
+ stripe *= conf->far_copies;
sector += stripe << conf->chunk_shift;
@@ -414,16 +420,24 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
{
sector_t offset, chunk, vchunk;
- while (sector > conf->stride) {
- sector -= conf->stride;
- if (dev < conf->near_copies)
- dev += conf->raid_disks - conf->near_copies;
- else
- dev -= conf->near_copies;
- }
-
offset = sector & conf->chunk_mask;
- chunk = sector >> conf->chunk_shift;
+ if (conf->far_offset) {
+ int fc;
+ chunk = sector >> conf->chunk_shift;
+ fc = sector_div(chunk, conf->far_copies);
+ dev -= fc * conf->near_copies;
+ if (dev < 0)
+ dev += conf->raid_disks;
+ } else {
+ while (sector > conf->stride) {
+ sector -= conf->stride;
+ if (dev < conf->near_copies)
+ dev += conf->raid_disks - conf->near_copies;
+ else
+ dev -= conf->near_copies;
+ }
+ chunk = sector >> conf->chunk_shift;
+ }
vchunk = chunk * conf->raid_disks + dev;
sector_div(vchunk, conf->near_copies);
return (vchunk << conf->chunk_shift) + offset;
@@ -900,9 +914,12 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
if (conf->near_copies > 1)
seq_printf(seq, " %d near-copies", conf->near_copies);
- if (conf->far_copies > 1)
- seq_printf(seq, " %d far-copies", conf->far_copies);
-
+ if (conf->far_copies > 1) {
+ if (conf->far_offset)
+ seq_printf(seq, " %d offset-copies", conf->far_copies);
+ else
+ seq_printf(seq, " %d far-copies", conf->far_copies);
+ }
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->working_disks);
for (i = 0; i < conf->raid_disks; i++)
@@ -1915,7 +1932,7 @@ static int run(mddev_t *mddev)
mirror_info_t *disk;
mdk_rdev_t *rdev;
struct list_head *tmp;
- int nc, fc;
+ int nc, fc, fo;
sector_t stride, size;
if (mddev->chunk_size == 0) {
@@ -1925,8 +1942,9 @@ static int run(mddev_t *mddev)
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
+ fo = mddev->layout & (1<<16);
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
- (mddev->layout >> 16)) {
+ (mddev->layout >> 17)) {
printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->layout);
goto out;
@@ -1958,12 +1976,16 @@ static int run(mddev_t *mddev)
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
+ conf->far_offset = fo;
conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
- stride = mddev->size >> (conf->chunk_shift-1);
- sector_div(stride, fc);
- conf->stride = stride << conf->chunk_shift;
-
+ if (fo)
+ conf->stride = 1 << conf->chunk_shift;
+ else {
+ stride = mddev->size >> (conf->chunk_shift-1);
+ sector_div(stride, fc);
+ conf->stride = stride << conf->chunk_shift;
+ }
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
r10bio_pool_free, conf);
if (!conf->r10bio_pool) {
@@ -2015,7 +2037,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + i;
- if (!disk->rdev) {
+ if (!disk->rdev ||
+ !test_bit(In_sync, &rdev->flags)) {
disk->head_position = 0;
mddev->degraded++;
}
@@ -2037,7 +2060,13 @@ static int run(mddev_t *mddev)
/*
* Ok, everything is just fine now
*/
- size = conf->stride * conf->raid_disks;
+ if (conf->far_offset) {
+ size = mddev->size >> (conf->chunk_shift-1);
+ size *= conf->raid_disks;
+ size <<= conf->chunk_shift;
+ sector_div(size, conf->far_copies);
+ } else
+ size = conf->stride * conf->raid_disks;
sector_div(size, conf->near_copies);
mddev->array_size = size/2;
mddev->resync_max_sectors = size;
@@ -2050,7 +2079,7 @@ static int run(mddev_t *mddev)
* maybe...
*/
{
- int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
+ int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31843604049c..837ec4eb3d60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2,8 +2,11 @@
* raid5.c : Multiple Devices driver for Linux
* Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
* Copyright (C) 1999, 2000 Ingo Molnar
+ * Copyright (C) 2002, 2003 H. Peter Anvin
*
- * RAID-5 management functions.
+ * RAID-4/5/6 management functions.
+ * Thanks to Penguin Computing for making the RAID-6 development possible
+ * by donating a test server!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,11 +22,11 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/raid/raid5.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/kthread.h>
#include <asm/atomic.h>
+#include "raid6.h"
#include <linux/raid/bitmap.h>
@@ -68,6 +71,16 @@
#define __inline__
#endif
+#if !RAID6_USE_EMPTY_ZERO_PAGE
+/* In .bss so it's zeroed */
+const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
+#endif
+
+static inline int raid6_next_disk(int disk, int raid_disks)
+{
+ disk++;
+ return (disk < raid_disks) ? disk : 0;
+}
static void print_raid5_conf (raid5_conf_t *conf);
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
@@ -104,7 +117,7 @@ static void release_stripe(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
unsigned long flags;
-
+
spin_lock_irqsave(&conf->device_lock, flags);
__release_stripe(conf, sh);
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -117,7 +130,7 @@ static inline void remove_hash(struct stripe_head *sh)
hlist_del_init(&sh->hash);
}
-static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
+static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
{
struct hlist_head *hp = stripe_hash(conf, sh->sector);
@@ -190,7 +203,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
(unsigned long long)sh->sector);
remove_hash(sh);
-
+
sh->sector = sector;
sh->pd_idx = pd_idx;
sh->state = 0;
@@ -269,8 +282,9 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- if (!list_empty(&sh->lru))
- list_del_init(&sh->lru);
+ if (list_empty(&sh->lru))
+ BUG();
+ list_del_init(&sh->lru);
}
}
} while (sh == NULL);
@@ -321,10 +335,9 @@ static int grow_stripes(raid5_conf_t *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- while (num--) {
+ while (num--)
if (!grow_one_stripe(conf))
return 1;
- }
return 0;
}
@@ -631,8 +644,7 @@ static void raid5_build_block (struct stripe_head *sh, int i)
dev->req.bi_private = sh;
dev->flags = 0;
- if (i != sh->pd_idx)
- dev->sector = compute_blocknr(sh, i);
+ dev->sector = compute_blocknr(sh, i);
}
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -659,7 +671,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
" Operation continuing on %d devices\n",
bdevname(rdev->bdev,b), conf->working_disks);
}
-}
+}
/*
* Input: a 'big' sector number,
@@ -697,9 +709,12 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
/*
* Select the parity disk based on the user selected algorithm.
*/
- if (conf->level == 4)
+ switch(conf->level) {
+ case 4:
*pd_idx = data_disks;
- else switch (conf->algorithm) {
+ break;
+ case 5:
+ switch (conf->algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
*pd_idx = data_disks - stripe % raid_disks;
if (*dd_idx >= *pd_idx)
@@ -721,6 +736,39 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
default:
printk(KERN_ERR "raid5: unsupported algorithm %d\n",
conf->algorithm);
+ }
+ break;
+ case 6:
+
+ /**** FIX THIS ****/
+ switch (conf->algorithm) {
+ case ALGORITHM_LEFT_ASYMMETRIC:
+ *pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ if (*pd_idx == raid_disks-1)
+ (*dd_idx)++; /* Q D D D P */
+ else if (*dd_idx >= *pd_idx)
+ (*dd_idx) += 2; /* D D P Q D */
+ break;
+ case ALGORITHM_RIGHT_ASYMMETRIC:
+ *pd_idx = stripe % raid_disks;
+ if (*pd_idx == raid_disks-1)
+ (*dd_idx)++; /* Q D D D P */
+ else if (*dd_idx >= *pd_idx)
+ (*dd_idx) += 2; /* D D P Q D */
+ break;
+ case ALGORITHM_LEFT_SYMMETRIC:
+ *pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
+ break;
+ case ALGORITHM_RIGHT_SYMMETRIC:
+ *pd_idx = stripe % raid_disks;
+ *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
+ break;
+ default:
+ printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
+ conf->algorithm);
+ }
+ break;
}
/*
@@ -742,12 +790,17 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
int chunk_number, dummy1, dummy2, dd_idx = i;
sector_t r_sector;
+
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
BUG_ON(new_sector != stripe);
-
- switch (conf->algorithm) {
+ if (i == sh->pd_idx)
+ return 0;
+ switch(conf->level) {
+ case 4: break;
+ case 5:
+ switch (conf->algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
case ALGORITHM_RIGHT_ASYMMETRIC:
if (i > sh->pd_idx)
@@ -761,7 +814,37 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
break;
default:
printk(KERN_ERR "raid5: unsupported algorithm %d\n",
+ conf->algorithm);
+ }
+ break;
+ case 6:
+ data_disks = raid_disks - 2;
+ if (i == raid6_next_disk(sh->pd_idx, raid_disks))
+ return 0; /* It is the Q disk */
+ switch (conf->algorithm) {
+ case ALGORITHM_LEFT_ASYMMETRIC:
+ case ALGORITHM_RIGHT_ASYMMETRIC:
+ if (sh->pd_idx == raid_disks-1)
+ i--; /* Q D D D P */
+ else if (i > sh->pd_idx)
+ i -= 2; /* D D P Q D */
+ break;
+ case ALGORITHM_LEFT_SYMMETRIC:
+ case ALGORITHM_RIGHT_SYMMETRIC:
+ if (sh->pd_idx == raid_disks-1)
+ i--; /* Q D D D P */
+ else {
+ /* D D P Q D */
+ if (i < sh->pd_idx)
+ i += raid_disks;
+ i -= (sh->pd_idx + 2);
+ }
+ break;
+ default:
+ printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
conf->algorithm);
+ }
+ break;
}
chunk_number = stripe * data_disks + i;
@@ -778,10 +861,11 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
/*
- * Copy data between a page in the stripe cache, and a bio.
- * There are no alignment or size guarantees between the page or the
- * bio except that there is some overlap.
- * All iovecs in the bio must be considered.
+ * Copy data between a page in the stripe cache, and one or more bion
+ * The page could align with the middle of the bio, or there could be
+ * several bion, each with several bio_vecs, which cover part of the page
+ * Multiple bion are linked together on bi_next. There may be extras
+ * at the end of this list. We ignore them.
*/
static void copy_data(int frombio, struct bio *bio,
struct page *page,
@@ -810,7 +894,7 @@ static void copy_data(int frombio, struct bio *bio,
if (len > 0 && page_offset + len > STRIPE_SIZE)
clen = STRIPE_SIZE - page_offset;
else clen = len;
-
+
if (clen > 0) {
char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
if (frombio)
@@ -862,14 +946,14 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
}
-static void compute_parity(struct stripe_head *sh, int method)
+static void compute_parity5(struct stripe_head *sh, int method)
{
raid5_conf_t *conf = sh->raid_conf;
int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
void *ptr[MAX_XOR_BLOCKS];
struct bio *chosen;
- PRINTK("compute_parity, stripe %llu, method %d\n",
+ PRINTK("compute_parity5, stripe %llu, method %d\n",
(unsigned long long)sh->sector, method);
count = 1;
@@ -956,9 +1040,195 @@ static void compute_parity(struct stripe_head *sh, int method)
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
}
+static void compute_parity6(struct stripe_head *sh, int method)
+{
+ raid6_conf_t *conf = sh->raid_conf;
+ int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
+ struct bio *chosen;
+ /**** FIX THIS: This could be very bad if disks is close to 256 ****/
+ void *ptrs[disks];
+
+ qd_idx = raid6_next_disk(pd_idx, disks);
+ d0_idx = raid6_next_disk(qd_idx, disks);
+
+ PRINTK("compute_parity, stripe %llu, method %d\n",
+ (unsigned long long)sh->sector, method);
+
+ switch(method) {
+ case READ_MODIFY_WRITE:
+ BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
+ case RECONSTRUCT_WRITE:
+ for (i= disks; i-- ;)
+ if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
+ chosen = sh->dev[i].towrite;
+ sh->dev[i].towrite = NULL;
+
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wake_up(&conf->wait_for_overlap);
+
+ if (sh->dev[i].written) BUG();
+ sh->dev[i].written = chosen;
+ }
+ break;
+ case CHECK_PARITY:
+ BUG(); /* Not implemented yet */
+ }
+
+ for (i = disks; i--;)
+ if (sh->dev[i].written) {
+ sector_t sector = sh->dev[i].sector;
+ struct bio *wbi = sh->dev[i].written;
+ while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
+ copy_data(1, wbi, sh->dev[i].page, sector);
+ wbi = r5_next_bio(wbi, sector);
+ }
+
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ }
+
+// switch(method) {
+// case RECONSTRUCT_WRITE:
+// case CHECK_PARITY:
+// case UPDATE_PARITY:
+ /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
+ /* FIX: Is this ordering of drives even remotely optimal? */
+ count = 0;
+ i = d0_idx;
+ do {
+ ptrs[count++] = page_address(sh->dev[i].page);
+ if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
+ printk("block %d/%d not uptodate on parity calc\n", i,count);
+ i = raid6_next_disk(i, disks);
+ } while ( i != d0_idx );
+// break;
+// }
+
+ raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
+
+ switch(method) {
+ case RECONSTRUCT_WRITE:
+ set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+ set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
+ set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
+ set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
+ break;
+ case UPDATE_PARITY:
+ set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+ set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
+ break;
+ }
+}
+
+
+/* Compute one missing block */
+static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
+{
+ raid6_conf_t *conf = sh->raid_conf;
+ int i, count, disks = conf->raid_disks;
+ void *ptr[MAX_XOR_BLOCKS], *p;
+ int pd_idx = sh->pd_idx;
+ int qd_idx = raid6_next_disk(pd_idx, disks);
+
+ PRINTK("compute_block_1, stripe %llu, idx %d\n",
+ (unsigned long long)sh->sector, dd_idx);
+
+ if ( dd_idx == qd_idx ) {
+ /* We're actually computing the Q drive */
+ compute_parity6(sh, UPDATE_PARITY);
+ } else {
+ ptr[0] = page_address(sh->dev[dd_idx].page);
+ if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
+ count = 1;
+ for (i = disks ; i--; ) {
+ if (i == dd_idx || i == qd_idx)
+ continue;
+ p = page_address(sh->dev[i].page);
+ if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
+ ptr[count++] = p;
+ else
+ printk("compute_block() %d, stripe %llu, %d"
+ " not present\n", dd_idx,
+ (unsigned long long)sh->sector, i);
+
+ check_xor();
+ }
+ if (count != 1)
+ xor_block(count, STRIPE_SIZE, ptr);
+ if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+ else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+ }
+}
+
+/* Compute two missing blocks */
+static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
+{
+ raid6_conf_t *conf = sh->raid_conf;
+ int i, count, disks = conf->raid_disks;
+ int pd_idx = sh->pd_idx;
+ int qd_idx = raid6_next_disk(pd_idx, disks);
+ int d0_idx = raid6_next_disk(qd_idx, disks);
+ int faila, failb;
+
+ /* faila and failb are disk numbers relative to d0_idx */
+ /* pd_idx become disks-2 and qd_idx become disks-1 */
+ faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
+ failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
+
+ BUG_ON(faila == failb);
+ if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
+
+ PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
+ (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
+
+ if ( failb == disks-1 ) {
+ /* Q disk is one of the missing disks */
+ if ( faila == disks-2 ) {
+ /* Missing P+Q, just recompute */
+ compute_parity6(sh, UPDATE_PARITY);
+ return;
+ } else {
+ /* We're missing D+Q; recompute D from P */
+ compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
+ compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
+ return;
+ }
+ }
+
+ /* We're missing D+P or D+D; build pointer table */
+ {
+ /**** FIX THIS: This could be very bad if disks is close to 256 ****/
+ void *ptrs[disks];
+
+ count = 0;
+ i = d0_idx;
+ do {
+ ptrs[count++] = page_address(sh->dev[i].page);
+ i = raid6_next_disk(i, disks);
+ if (i != dd_idx1 && i != dd_idx2 &&
+ !test_bit(R5_UPTODATE, &sh->dev[i].flags))
+ printk("compute_2 with missing block %d/%d\n", count, i);
+ } while ( i != d0_idx );
+
+ if ( failb == disks-2 ) {
+ /* We're missing D+P. */
+ raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
+ } else {
+ /* We're missing D+D. */
+ raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
+ }
+
+ /* Both the above update both missing blocks */
+ set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
+ set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
+ }
+}
+
+
+
/*
* Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
+ * toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
@@ -1031,6 +1301,13 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
static void end_reshape(raid5_conf_t *conf);
+static int page_is_zero(struct page *p)
+{
+ char *a = page_address(p);
+ return ((*(u32*)a) == 0 &&
+ memcmp(a, a+4, STRIPE_SIZE-4)==0);
+}
+
static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
{
int sectors_per_chunk = conf->chunk_size >> 9;
@@ -1062,7 +1339,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
*
*/
-static void handle_stripe(struct stripe_head *sh)
+static void handle_stripe5(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
@@ -1394,7 +1671,7 @@ static void handle_stripe(struct stripe_head *sh)
if (locked == 0 && (rcw == 0 ||rmw == 0) &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)) {
PRINTK("Computing parity...\n");
- compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
+ compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
/* now every locked buffer is ready to be written */
for (i=disks; i--;)
if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
@@ -1421,13 +1698,10 @@ static void handle_stripe(struct stripe_head *sh)
!test_bit(STRIPE_INSYNC, &sh->state)) {
set_bit(STRIPE_HANDLE, &sh->state);
if (failed == 0) {
- char *pagea;
BUG_ON(uptodate != disks);
- compute_parity(sh, CHECK_PARITY);
+ compute_parity5(sh, CHECK_PARITY);
uptodate--;
- pagea = page_address(sh->dev[sh->pd_idx].page);
- if ((*(u32*)pagea) == 0 &&
- !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
+ if (page_is_zero(sh->dev[sh->pd_idx].page)) {
/* parity is correct (on disc, not in buffer any more) */
set_bit(STRIPE_INSYNC, &sh->state);
} else {
@@ -1487,7 +1761,7 @@ static void handle_stripe(struct stripe_head *sh)
/* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
- compute_parity(sh, RECONSTRUCT_WRITE);
+ compute_parity5(sh, RECONSTRUCT_WRITE);
for (i= conf->raid_disks; i--;) {
set_bit(R5_LOCKED, &sh->dev[i].flags);
locked++;
@@ -1615,6 +1889,569 @@ static void handle_stripe(struct stripe_head *sh)
}
}
+static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+{
+ raid6_conf_t *conf = sh->raid_conf;
+ int disks = conf->raid_disks;
+ struct bio *return_bi= NULL;
+ struct bio *bi;
+ int i;
+ int syncing;
+ int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
+ int non_overwrite = 0;
+ int failed_num[2] = {0, 0};
+ struct r5dev *dev, *pdev, *qdev;
+ int pd_idx = sh->pd_idx;
+ int qd_idx = raid6_next_disk(pd_idx, disks);
+ int p_failed, q_failed;
+
+ PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
+ (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
+ pd_idx, qd_idx);
+
+ spin_lock(&sh->lock);
+ clear_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+
+ syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ /* Now to look around and see what can be done */
+
+ rcu_read_lock();
+ for (i=disks; i--; ) {
+ mdk_rdev_t *rdev;
+ dev = &sh->dev[i];
+ clear_bit(R5_Insync, &dev->flags);
+
+ PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
+ i, dev->flags, dev->toread, dev->towrite, dev->written);
+ /* maybe we can reply to a read */
+ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
+ struct bio *rbi, *rbi2;
+ PRINTK("Return read for disc %d\n", i);
+ spin_lock_irq(&conf->device_lock);
+ rbi = dev->toread;
+ dev->toread = NULL;
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&conf->wait_for_overlap);
+ spin_unlock_irq(&conf->device_lock);
+ while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
+ copy_data(0, rbi, dev->page, dev->sector);
+ rbi2 = r5_next_bio(rbi, dev->sector);
+ spin_lock_irq(&conf->device_lock);
+ if (--rbi->bi_phys_segments == 0) {
+ rbi->bi_next = return_bi;
+ return_bi = rbi;
+ }
+ spin_unlock_irq(&conf->device_lock);
+ rbi = rbi2;
+ }
+ }
+
+ /* now count some things */
+ if (test_bit(R5_LOCKED, &dev->flags)) locked++;
+ if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
+
+
+ if (dev->toread) to_read++;
+ if (dev->towrite) {
+ to_write++;
+ if (!test_bit(R5_OVERWRITE, &dev->flags))
+ non_overwrite++;
+ }
+ if (dev->written) written++;
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (!rdev || !test_bit(In_sync, &rdev->flags)) {
+ /* The ReadError flag will just be confusing now */
+ clear_bit(R5_ReadError, &dev->flags);
+ clear_bit(R5_ReWrite, &dev->flags);
+ }
+ if (!rdev || !test_bit(In_sync, &rdev->flags)
+ || test_bit(R5_ReadError, &dev->flags)) {
+ if ( failed < 2 )
+ failed_num[failed] = i;
+ failed++;
+ } else
+ set_bit(R5_Insync, &dev->flags);
+ }
+ rcu_read_unlock();
+ PRINTK("locked=%d uptodate=%d to_read=%d"
+ " to_write=%d failed=%d failed_num=%d,%d\n",
+ locked, uptodate, to_read, to_write, failed,
+ failed_num[0], failed_num[1]);
+ /* check if the array has lost >2 devices and, if so, some requests might
+ * need to be failed
+ */
+ if (failed > 2 && to_read+to_write+written) {
+ for (i=disks; i--; ) {
+ int bitmap_end = 0;
+
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ mdk_rdev_t *rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags))
+ /* multiple read failures in one stripe */
+ md_error(conf->mddev, rdev);
+ rcu_read_unlock();
+ }
+
+ spin_lock_irq(&conf->device_lock);
+ /* fail all writes first */
+ bi = sh->dev[i].towrite;
+ sh->dev[i].towrite = NULL;
+ if (bi) { to_write--; bitmap_end = 1; }
+
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wake_up(&conf->wait_for_overlap);
+
+ while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
+ struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ bi->bi_next = return_bi;
+ return_bi = bi;
+ }
+ bi = nextbi;
+ }
+ /* and fail all 'written' */
+ bi = sh->dev[i].written;
+ sh->dev[i].written = NULL;
+ if (bi) bitmap_end = 1;
+ while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
+ struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ bi->bi_next = return_bi;
+ return_bi = bi;
+ }
+ bi = bi2;
+ }
+
+ /* fail any reads if this device is non-operational */
+ if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+ test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ bi = sh->dev[i].toread;
+ sh->dev[i].toread = NULL;
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wake_up(&conf->wait_for_overlap);
+ if (bi) to_read--;
+ while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
+ struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ bi->bi_next = return_bi;
+ return_bi = bi;
+ }
+ bi = nextbi;
+ }
+ }
+ spin_unlock_irq(&conf->device_lock);
+ if (bitmap_end)
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0, 0);
+ }
+ }
+ if (failed > 2 && syncing) {
+ md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ syncing = 0;
+ }
+
+ /*
+ * might be able to return some write requests if the parity blocks
+ * are safe, or on a failed drive
+ */
+ pdev = &sh->dev[pd_idx];
+ p_failed = (failed >= 1 && failed_num[0] == pd_idx)
+ || (failed >= 2 && failed_num[1] == pd_idx);
+ qdev = &sh->dev[qd_idx];
+ q_failed = (failed >= 1 && failed_num[0] == qd_idx)
+ || (failed >= 2 && failed_num[1] == qd_idx);
+
+ if ( written &&
+ ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ && !test_bit(R5_LOCKED, &pdev->flags)
+ && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
+ ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ && !test_bit(R5_LOCKED, &qdev->flags)
+ && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
+ /* any written block on an uptodate or failed drive can be
+ * returned. Note that if we 'wrote' to a failed drive,
+ * it will be UPTODATE, but never LOCKED, so we don't need
+ * to test 'failed' directly.
+ */
+ for (i=disks; i--; )
+ if (sh->dev[i].written) {
+ dev = &sh->dev[i];
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ test_bit(R5_UPTODATE, &dev->flags) ) {
+ /* We can return any write requests */
+ int bitmap_end = 0;
+ struct bio *wbi, *wbi2;
+ PRINTK("Return write for stripe %llu disc %d\n",
+ (unsigned long long)sh->sector, i);
+ spin_lock_irq(&conf->device_lock);
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (--wbi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ wbi->bi_next = return_bi;
+ return_bi = wbi;
+ }
+ wbi = wbi2;
+ }
+ if (dev->towrite == NULL)
+ bitmap_end = 1;
+ spin_unlock_irq(&conf->device_lock);
+ if (bitmap_end)
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state), 0);
+ }
+ }
+ }
+
+ /* Now we might consider reading some blocks, either to check/generate
+ * parity, or to satisfy requests
+ * or to load a block that is being partially written.
+ */
+ if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
+ for (i=disks; i--;) {
+ dev = &sh->dev[i];
+ if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
+ (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ syncing ||
+ (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
+ (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
+ )
+ ) {
+ /* we would like to get this block, possibly
+ * by computing it, but we might not be able to
+ */
+ if (uptodate == disks-1) {
+ PRINTK("Computing stripe %llu block %d\n",
+ (unsigned long long)sh->sector, i);
+ compute_block_1(sh, i, 0);
+ uptodate++;
+ } else if ( uptodate == disks-2 && failed >= 2 ) {
+ /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
+ int other;
+ for (other=disks; other--;) {
+ if ( other == i )
+ continue;
+ if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
+ break;
+ }
+ BUG_ON(other < 0);
+ PRINTK("Computing stripe %llu blocks %d,%d\n",
+ (unsigned long long)sh->sector, i, other);
+ compute_block_2(sh, i, other);
+ uptodate += 2;
+ } else if (test_bit(R5_Insync, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+#if 0
+ /* if I am just reading this block and we don't have
+ a failed drive, or any pending writes then sidestep the cache */
+ if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
+ ! syncing && !failed && !to_write) {
+ sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
+ sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
+ }
+#endif
+ locked++;
+ PRINTK("Reading block %d (sync=%d)\n",
+ i, syncing);
+ }
+ }
+ }
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+
+ /* now to consider writing and what else, if anything should be read */
+ if (to_write) {
+ int rcw=0, must_compute=0;
+ for (i=disks ; i--;) {
+ dev = &sh->dev[i];
+ /* Would I have to read this buffer for reconstruct_write */
+ if (!test_bit(R5_OVERWRITE, &dev->flags)
+ && i != pd_idx && i != qd_idx
+ && (!test_bit(R5_LOCKED, &dev->flags)
+#if 0
+ || sh->bh_page[i] != bh->b_page
+#endif
+ ) &&
+ !test_bit(R5_UPTODATE, &dev->flags)) {
+ if (test_bit(R5_Insync, &dev->flags)) rcw++;
+ else {
+ PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
+ must_compute++;
+ }
+ }
+ }
+ PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
+ (unsigned long long)sh->sector, rcw, must_compute);
+ set_bit(STRIPE_HANDLE, &sh->state);
+
+ if (rcw > 0)
+ /* want reconstruct write, but need to get some data */
+ for (i=disks; i--;) {
+ dev = &sh->dev[i];
+ if (!test_bit(R5_OVERWRITE, &dev->flags)
+ && !(failed == 0 && (i == pd_idx || i == qd_idx))
+ && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ {
+ PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ locked++;
+ } else {
+ PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+ }
+ /* now if nothing is locked, and if we have enough data, we can start a write request */
+ if (locked == 0 && rcw == 0 &&
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
+ if ( must_compute > 0 ) {
+ /* We have failed blocks and need to compute them */
+ switch ( failed ) {
+ case 0: BUG();
+ case 1: compute_block_1(sh, failed_num[0], 0); break;
+ case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
+ default: BUG(); /* This request should have been failed? */
+ }
+ }
+
+ PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
+ compute_parity6(sh, RECONSTRUCT_WRITE);
+ /* now every locked buffer is ready to be written */
+ for (i=disks; i--;)
+ if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
+ PRINTK("Writing stripe %llu block %d\n",
+ (unsigned long long)sh->sector, i);
+ locked++;
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ }
+ /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
+ set_bit(STRIPE_INSYNC, &sh->state);
+
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+ }
+ }
+
+ /* maybe we need to check and possibly fix the parity for this stripe
+ * Any reads will already have been scheduled, so we just see if enough data
+ * is available
+ */
+ if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ int update_p = 0, update_q = 0;
+ struct r5dev *dev;
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+
+ BUG_ON(failed>2);
+ BUG_ON(uptodate < disks);
+ /* Want to check and possibly repair P and Q.
+ * However there could be one 'failed' device, in which
+ * case we can only check one of them, possibly using the
+ * other to generate missing data
+ */
+
+ /* If !tmp_page, we cannot do the calculations,
+ * but as we have set STRIPE_HANDLE, we will soon be called
+ * by stripe_handle with a tmp_page - just wait until then.
+ */
+ if (tmp_page) {
+ if (failed == q_failed) {
+ /* The only possible failed device holds 'Q', so it makes
+ * sense to check P (If anything else were failed, we would
+ * have used P to recreate it).
+ */
+ compute_block_1(sh, pd_idx, 1);
+ if (!page_is_zero(sh->dev[pd_idx].page)) {
+ compute_block_1(sh,pd_idx,0);
+ update_p = 1;
+ }
+ }
+ if (!q_failed && failed < 2) {
+ /* q is not failed, and we didn't use it to generate
+ * anything, so it makes sense to check it
+ */
+ memcpy(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE);
+ compute_parity6(sh, UPDATE_PARITY);
+ if (memcmp(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE)!= 0) {
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ update_q = 1;
+ }
+ }
+ if (update_p || update_q) {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ update_p = update_q = 0;
+ }
+
+ /* now write out any block on a failed drive,
+ * or P or Q if they need it
+ */
+
+ if (failed == 2) {
+ dev = &sh->dev[failed_num[1]];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (failed >= 1) {
+ dev = &sh->dev[failed_num[0]];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+
+ if (update_p) {
+ dev = &sh->dev[pd_idx];
+ locked ++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (update_q) {
+ dev = &sh->dev[qd_idx];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ clear_bit(STRIPE_DEGRADED, &sh->state);
+
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+ }
+
+ if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ md_done_sync(conf->mddev, STRIPE_SECTORS,1);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ }
+
+ /* If the failed drives are just a ReadError, then we might need
+ * to progress the repair/check process
+ */
+ if (failed <= 2 && ! conf->mddev->ro)
+ for (i=0; i<failed;i++) {
+ dev = &sh->dev[failed_num[i]];
+ if (test_bit(R5_ReadError, &dev->flags)
+ && !test_bit(R5_LOCKED, &dev->flags)
+ && test_bit(R5_UPTODATE, &dev->flags)
+ ) {
+ if (!test_bit(R5_ReWrite, &dev->flags)) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_ReWrite, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ } else {
+ /* let's read it back */
+ set_bit(R5_Wantread, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ }
+ }
+ }
+ spin_unlock(&sh->lock);
+
+ while ((bi=return_bi)) {
+ int bytes = bi->bi_size;
+
+ return_bi = bi->bi_next;
+ bi->bi_next = NULL;
+ bi->bi_size = 0;
+ bi->bi_end_io(bi, bytes, 0);
+ }
+ for (i=disks; i-- ;) {
+ int rw;
+ struct bio *bi;
+ mdk_rdev_t *rdev;
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
+ rw = 1;
+ else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
+ rw = 0;
+ else
+ continue;
+
+ bi = &sh->dev[i].req;
+
+ bi->bi_rw = rw;
+ if (rw)
+ bi->bi_end_io = raid5_end_write_request;
+ else
+ bi->bi_end_io = raid5_end_read_request;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
+ if (rdev)
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+
+ if (rdev) {
+ if (syncing)
+ md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+
+ bi->bi_bdev = rdev->bdev;
+ PRINTK("for %llu schedule op %ld on disc %d\n",
+ (unsigned long long)sh->sector, bi->bi_rw, i);
+ atomic_inc(&sh->count);
+ bi->bi_sector = sh->sector + rdev->data_offset;
+ bi->bi_flags = 1 << BIO_UPTODATE;
+ bi->bi_vcnt = 1;
+ bi->bi_max_vecs = 1;
+ bi->bi_idx = 0;
+ bi->bi_io_vec = &sh->dev[i].vec;
+ bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ bi->bi_io_vec[0].bv_offset = 0;
+ bi->bi_size = STRIPE_SIZE;
+ bi->bi_next = NULL;
+ if (rw == WRITE &&
+ test_bit(R5_ReWrite, &sh->dev[i].flags))
+ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
+ generic_make_request(bi);
+ } else {
+ if (rw == 1)
+ set_bit(STRIPE_DEGRADED, &sh->state);
+ PRINTK("skip op %ld on disc %d for sector %llu\n",
+ bi->bi_rw, i, (unsigned long long)sh->sector);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+}
+
+static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+{
+ if (sh->raid_conf->level == 6)
+ handle_stripe6(sh, tmp_page);
+ else
+ handle_stripe5(sh);
+}
+
+
+
static void raid5_activate_delayed(raid5_conf_t *conf)
{
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
@@ -1753,7 +2590,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
- int disks;
+ int disks, data_disks;
retry:
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
@@ -1781,7 +2618,9 @@ static int make_request(request_queue_t *q, struct bio * bi)
}
spin_unlock_irq(&conf->device_lock);
}
- new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
+ data_disks = disks - conf->max_degraded;
+
+ new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
&dd_idx, &pd_idx, conf);
PRINTK("raid5: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
@@ -1833,7 +2672,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
}
finish_wait(&conf->wait_for_overlap, &w);
raid5_plug_device(conf);
- handle_stripe(sh);
+ handle_stripe(sh, NULL);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -1849,7 +2688,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
if (remaining == 0) {
int bytes = bi->bi_size;
- if ( bio_data_dir(bi) == WRITE )
+ if ( rw == WRITE )
md_write_end(mddev);
bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
@@ -1857,17 +2696,141 @@ static int make_request(request_queue_t *q, struct bio * bi)
return 0;
}
-/* FIXME go_faster isn't used */
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
{
+ /* reshaping is quite different to recovery/resync so it is
+ * handled quite separately ... here.
+ *
+ * On each call to sync_request, we gather one chunk worth of
+ * destination stripes and flag them as expanding.
+ * Then we find all the source stripes and request reads.
+ * As the reads complete, handle_stripe will copy the data
+ * into the destination stripe and release that stripe.
+ */
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
struct stripe_head *sh;
int pd_idx;
sector_t first_sector, last_sector;
+ int raid_disks;
+ int data_disks;
+ int i;
+ int dd_idx;
+ sector_t writepos, safepos, gap;
+
+ if (sector_nr == 0 &&
+ conf->expand_progress != 0) {
+ /* restarting in the middle, skip the initial sectors */
+ sector_nr = conf->expand_progress;
+ sector_div(sector_nr, conf->raid_disks-1);
+ *skipped = 1;
+ return sector_nr;
+ }
+
+ /* we update the metadata when there is more than 3Meg
+ * in the block range (that is rather arbitrary, should
+ * probably be time based) or when the data about to be
+ * copied would over-write the source of the data at
+ * the front of the range.
+ * i.e. one new_stripe forward from expand_progress new_maps
+ * to after where expand_lo old_maps to
+ */
+ writepos = conf->expand_progress +
+ conf->chunk_size/512*(conf->raid_disks-1);
+ sector_div(writepos, conf->raid_disks-1);
+ safepos = conf->expand_lo;
+ sector_div(safepos, conf->previous_raid_disks-1);
+ gap = conf->expand_progress - conf->expand_lo;
+
+ if (writepos >= safepos ||
+ gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+ /* Cannot proceed until we've updated the superblock... */
+ wait_event(conf->wait_for_overlap,
+ atomic_read(&conf->reshape_stripes)==0);
+ mddev->reshape_position = conf->expand_progress;
+ mddev->sb_dirty = 1;
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
+ kthread_should_stop());
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_lo = mddev->reshape_position;
+ spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
+ }
+
+ for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
+ int j;
+ int skipped = 0;
+ pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
+ sh = get_active_stripe(conf, sector_nr+i,
+ conf->raid_disks, pd_idx, 0);
+ set_bit(STRIPE_EXPANDING, &sh->state);
+ atomic_inc(&conf->reshape_stripes);
+ /* If any of this stripe is beyond the end of the old
+ * array, then we need to zero those blocks
+ */
+ for (j=sh->disks; j--;) {
+ sector_t s;
+ if (j == sh->pd_idx)
+ continue;
+ s = compute_blocknr(sh, j);
+ if (s < (mddev->array_size<<1)) {
+ skipped = 1;
+ continue;
+ }
+ memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
+ set_bit(R5_Expanded, &sh->dev[j].flags);
+ set_bit(R5_UPTODATE, &sh->dev[j].flags);
+ }
+ if (!skipped) {
+ set_bit(STRIPE_EXPAND_READY, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ release_stripe(sh);
+ }
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
+ spin_unlock_irq(&conf->device_lock);
+ /* Ok, those stripe are ready. We can start scheduling
+ * reads on the source stripes.
+ * The source stripes are determined by mapping the first and last
+ * block on the destination stripes.
+ */
+ raid_disks = conf->previous_raid_disks;
+ data_disks = raid_disks - 1;
+ first_sector =
+ raid5_compute_sector(sector_nr*(conf->raid_disks-1),
+ raid_disks, data_disks,
+ &dd_idx, &pd_idx, conf);
+ last_sector =
+ raid5_compute_sector((sector_nr+conf->chunk_size/512)
+ *(conf->raid_disks-1) -1,
+ raid_disks, data_disks,
+ &dd_idx, &pd_idx, conf);
+ if (last_sector >= (mddev->size<<1))
+ last_sector = (mddev->size<<1)-1;
+ while (first_sector <= last_sector) {
+ pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
+ sh = get_active_stripe(conf, first_sector,
+ conf->previous_raid_disks, pd_idx, 0);
+ set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ first_sector += STRIPE_SECTORS;
+ }
+ return conf->chunk_size>>9;
+}
+
+/* FIXME go_faster isn't used */
+static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+{
+ raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ struct stripe_head *sh;
+ int pd_idx;
int raid_disks = conf->raid_disks;
- int data_disks = raid_disks-1;
sector_t max_sector = mddev->size << 1;
int sync_blocks;
+ int still_degraded = 0;
+ int i;
if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */
@@ -1880,134 +2843,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (mddev->curr_resync < max_sector) /* aborted */
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
- else /* compelted sync */
+ else /* completed sync */
conf->fullsync = 0;
bitmap_close_sync(mddev->bitmap);
return 0;
}
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
- /* reshaping is quite different to recovery/resync so it is
- * handled quite separately ... here.
- *
- * On each call to sync_request, we gather one chunk worth of
- * destination stripes and flag them as expanding.
- * Then we find all the source stripes and request reads.
- * As the reads complete, handle_stripe will copy the data
- * into the destination stripe and release that stripe.
- */
- int i;
- int dd_idx;
- sector_t writepos, safepos, gap;
-
- if (sector_nr == 0 &&
- conf->expand_progress != 0) {
- /* restarting in the middle, skip the initial sectors */
- sector_nr = conf->expand_progress;
- sector_div(sector_nr, conf->raid_disks-1);
- *skipped = 1;
- return sector_nr;
- }
-
- /* we update the metadata when there is more than 3Meg
- * in the block range (that is rather arbitrary, should
- * probably be time based) or when the data about to be
- * copied would over-write the source of the data at
- * the front of the range.
- * i.e. one new_stripe forward from expand_progress new_maps
- * to after where expand_lo old_maps to
- */
- writepos = conf->expand_progress +
- conf->chunk_size/512*(conf->raid_disks-1);
- sector_div(writepos, conf->raid_disks-1);
- safepos = conf->expand_lo;
- sector_div(safepos, conf->previous_raid_disks-1);
- gap = conf->expand_progress - conf->expand_lo;
-
- if (writepos >= safepos ||
- gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
- /* Cannot proceed until we've updated the superblock... */
- wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0);
- mddev->reshape_position = conf->expand_progress;
- mddev->sb_dirty = 1;
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
- kthread_should_stop());
- spin_lock_irq(&conf->device_lock);
- conf->expand_lo = mddev->reshape_position;
- spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_for_overlap);
- }
-
- for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
- int j;
- int skipped = 0;
- pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
- sh = get_active_stripe(conf, sector_nr+i,
- conf->raid_disks, pd_idx, 0);
- set_bit(STRIPE_EXPANDING, &sh->state);
- atomic_inc(&conf->reshape_stripes);
- /* If any of this stripe is beyond the end of the old
- * array, then we need to zero those blocks
- */
- for (j=sh->disks; j--;) {
- sector_t s;
- if (j == sh->pd_idx)
- continue;
- s = compute_blocknr(sh, j);
- if (s < (mddev->array_size<<1)) {
- skipped = 1;
- continue;
- }
- memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
- set_bit(R5_Expanded, &sh->dev[j].flags);
- set_bit(R5_UPTODATE, &sh->dev[j].flags);
- }
- if (!skipped) {
- set_bit(STRIPE_EXPAND_READY, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- release_stripe(sh);
- }
- spin_lock_irq(&conf->device_lock);
- conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
- spin_unlock_irq(&conf->device_lock);
- /* Ok, those stripe are ready. We can start scheduling
- * reads on the source stripes.
- * The source stripes are determined by mapping the first and last
- * block on the destination stripes.
- */
- raid_disks = conf->previous_raid_disks;
- data_disks = raid_disks - 1;
- first_sector =
- raid5_compute_sector(sector_nr*(conf->raid_disks-1),
- raid_disks, data_disks,
- &dd_idx, &pd_idx, conf);
- last_sector =
- raid5_compute_sector((sector_nr+conf->chunk_size/512)
- *(conf->raid_disks-1) -1,
- raid_disks, data_disks,
- &dd_idx, &pd_idx, conf);
- if (last_sector >= (mddev->size<<1))
- last_sector = (mddev->size<<1)-1;
- while (first_sector <= last_sector) {
- pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
- sh = get_active_stripe(conf, first_sector,
- conf->previous_raid_disks, pd_idx, 0);
- set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
- first_sector += STRIPE_SECTORS;
- }
- return conf->chunk_size>>9;
- }
- /* if there is 1 or more failed drives and we are trying
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ return reshape_request(mddev, sector_nr, skipped);
+
+ /* if there is too many failed drives and we are trying
* to resync, then assert that we are finished, because there is
* nothing we can do.
*/
- if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (mddev->degraded >= conf->max_degraded &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
sector_t rv = (mddev->size << 1) - sector_nr;
*skipped = 1;
return rv;
@@ -2026,17 +2877,26 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (sh == NULL) {
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
/* make sure we don't swamp the stripe cache if someone else
- * is trying to get access
+ * is trying to get access
*/
schedule_timeout_uninterruptible(1);
}
- bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
- spin_lock(&sh->lock);
+ /* Need to check if array will still be degraded after recovery/resync
+ * We don't need to check the 'failed' flag as when that gets set,
+ * recovery aborts.
+ */
+ for (i=0; i<mddev->raid_disks; i++)
+ if (conf->disks[i].rdev == NULL)
+ still_degraded = 1;
+
+ bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
+
+ spin_lock(&sh->lock);
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
spin_unlock(&sh->lock);
- handle_stripe(sh);
+ handle_stripe(sh, NULL);
release_stripe(sh);
return STRIPE_SECTORS;
@@ -2091,7 +2951,7 @@ static void raid5d (mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
handled++;
- handle_stripe(sh);
+ handle_stripe(sh, conf->spare_page);
release_stripe(sh);
spin_lock_irq(&conf->device_lock);
@@ -2181,8 +3041,8 @@ static int run(mddev_t *mddev)
struct disk_info *disk;
struct list_head *tmp;
- if (mddev->level != 5 && mddev->level != 4) {
- printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
+ if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
+ printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->level);
return -EIO;
}
@@ -2251,6 +3111,11 @@ static int run(mddev_t *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
+ if (mddev->level == 6) {
+ conf->spare_page = alloc_page(GFP_KERNEL);
+ if (!conf->spare_page)
+ goto abort;
+ }
spin_lock_init(&conf->device_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
@@ -2282,12 +3147,16 @@ static int run(mddev_t *mddev)
}
/*
- * 0 for a fully functional array, 1 for a degraded array.
+ * 0 for a fully functional array, 1 or 2 for a degraded array.
*/
mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
conf->mddev = mddev;
conf->chunk_size = mddev->chunk_size;
conf->level = mddev->level;
+ if (conf->level == 6)
+ conf->max_degraded = 2;
+ else
+ conf->max_degraded = 1;
conf->algorithm = mddev->layout;
conf->max_nr_stripes = NR_STRIPES;
conf->expand_progress = mddev->reshape_position;
@@ -2296,6 +3165,11 @@ static int run(mddev_t *mddev)
mddev->size &= ~(mddev->chunk_size/1024 -1);
mddev->resync_max_sectors = mddev->size << 1;
+ if (conf->level == 6 && conf->raid_disks < 4) {
+ printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+ mdname(mddev), conf->raid_disks);
+ goto abort;
+ }
if (!conf->chunk_size || conf->chunk_size % 4) {
printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
conf->chunk_size, mdname(mddev));
@@ -2307,14 +3181,14 @@ static int run(mddev_t *mddev)
conf->algorithm, mdname(mddev));
goto abort;
}
- if (mddev->degraded > 1) {
+ if (mddev->degraded > conf->max_degraded) {
printk(KERN_ERR "raid5: not enough operational devices for %s"
" (%d/%d failed)\n",
mdname(mddev), conf->failed_disks, conf->raid_disks);
goto abort;
}
- if (mddev->degraded == 1 &&
+ if (mddev->degraded > 0 &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
@@ -2379,11 +3253,12 @@ static int run(mddev_t *mddev)
}
/* read-ahead size must cover two whole stripes, which is
- * 2 * (n-1) * chunksize where 'n' is the number of raid devices
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/
{
- int stripe = (mddev->raid_disks-1) * mddev->chunk_size
- / PAGE_SIZE;
+ int data_disks = conf->previous_raid_disks - conf->max_degraded;
+ int stripe = data_disks *
+ (mddev->chunk_size / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
@@ -2393,12 +3268,14 @@ static int run(mddev_t *mddev)
mddev->queue->unplug_fn = raid5_unplug_device;
mddev->queue->issue_flush_fn = raid5_issue_flush;
- mddev->array_size = mddev->size * (conf->previous_raid_disks - 1);
+ mddev->array_size = mddev->size * (conf->previous_raid_disks -
+ conf->max_degraded);
return 0;
abort:
if (conf) {
print_raid5_conf(conf);
+ safe_put_page(conf->spare_page);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
@@ -2427,23 +3304,23 @@ static int stop(mddev_t *mddev)
}
#if RAID5_DEBUG
-static void print_sh (struct stripe_head *sh)
+static void print_sh (struct seq_file *seq, struct stripe_head *sh)
{
int i;
- printk("sh %llu, pd_idx %d, state %ld.\n",
- (unsigned long long)sh->sector, sh->pd_idx, sh->state);
- printk("sh %llu, count %d.\n",
- (unsigned long long)sh->sector, atomic_read(&sh->count));
- printk("sh %llu, ", (unsigned long long)sh->sector);
+ seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
+ (unsigned long long)sh->sector, sh->pd_idx, sh->state);
+ seq_printf(seq, "sh %llu, count %d.\n",
+ (unsigned long long)sh->sector, atomic_read(&sh->count));
+ seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
for (i = 0; i < sh->disks; i++) {
- printk("(cache%d: %p %ld) ",
- i, sh->dev[i].page, sh->dev[i].flags);
+ seq_printf(seq, "(cache%d: %p %ld) ",
+ i, sh->dev[i].page, sh->dev[i].flags);
}
- printk("\n");
+ seq_printf(seq, "\n");
}
-static void printall (raid5_conf_t *conf)
+static void printall (struct seq_file *seq, raid5_conf_t *conf)
{
struct stripe_head *sh;
struct hlist_node *hn;
@@ -2454,7 +3331,7 @@ static void printall (raid5_conf_t *conf)
hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
if (sh->raid_conf != conf)
continue;
- print_sh(sh);
+ print_sh(seq, sh);
}
}
spin_unlock_irq(&conf->device_lock);
@@ -2474,9 +3351,8 @@ static void status (struct seq_file *seq, mddev_t *mddev)
test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
#if RAID5_DEBUG
-#define D(x) \
- seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
- printall(conf);
+ seq_printf (seq, "\n");
+ printall(seq, conf);
#endif
}
@@ -2560,14 +3436,20 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int disk;
struct disk_info *p;
- if (mddev->degraded > 1)
+ if (mddev->degraded > conf->max_degraded)
/* no point adding a device */
return 0;
/*
- * find the disk ...
+ * find the disk ... but prefer rdev->saved_raid_disk
+ * if possible.
*/
- for (disk=0; disk < conf->raid_disks; disk++)
+ if (rdev->saved_raid_disk >= 0 &&
+ conf->disks[rdev->saved_raid_disk].rdev == NULL)
+ disk = rdev->saved_raid_disk;
+ else
+ disk = 0;
+ for ( ; disk < conf->raid_disks; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
@@ -2590,8 +3472,10 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems
* worth it.
*/
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+
sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
- mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
+ mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
set_capacity(mddev->gendisk, mddev->array_size << 1);
mddev->changed = 1;
if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
@@ -2680,6 +3564,7 @@ static int raid5_start_reshape(mddev_t *mddev)
set_bit(In_sync, &rdev->flags);
conf->working_disks++;
added_devices++;
+ rdev->recovery_offset = 0;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
} else
@@ -2731,6 +3616,17 @@ static void end_reshape(raid5_conf_t *conf)
conf->expand_progress = MaxSector;
spin_unlock_irq(&conf->device_lock);
conf->mddev->reshape_position = MaxSector;
+
+ /* read-ahead size must cover two whole stripes, which is
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
+ */
+ {
+ int data_disks = conf->previous_raid_disks - conf->max_degraded;
+ int stripe = data_disks *
+ (conf->mddev->chunk_size / PAGE_SIZE);
+ if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ }
}
}
@@ -2762,6 +3658,23 @@ static void raid5_quiesce(mddev_t *mddev, int state)
}
}
+static struct mdk_personality raid6_personality =
+{
+ .name = "raid6",
+ .level = 6,
+ .owner = THIS_MODULE,
+ .make_request = make_request,
+ .run = run,
+ .stop = stop,
+ .status = status,
+ .error_handler = error,
+ .hot_add_disk = raid5_add_disk,
+ .hot_remove_disk= raid5_remove_disk,
+ .spare_active = raid5_spare_active,
+ .sync_request = sync_request,
+ .resize = raid5_resize,
+ .quiesce = raid5_quiesce,
+};
static struct mdk_personality raid5_personality =
{
.name = "raid5",
@@ -2804,6 +3717,12 @@ static struct mdk_personality raid4_personality =
static int __init raid5_init(void)
{
+ int e;
+
+ e = raid6_select_algo();
+ if ( e )
+ return e;
+ register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
return 0;
@@ -2811,6 +3730,7 @@ static int __init raid5_init(void)
static void raid5_exit(void)
{
+ unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
}
@@ -2823,3 +3743,10 @@ MODULE_ALIAS("md-raid5");
MODULE_ALIAS("md-raid4");
MODULE_ALIAS("md-level-5");
MODULE_ALIAS("md-level-4");
+MODULE_ALIAS("md-personality-8"); /* RAID6 */
+MODULE_ALIAS("md-raid6");
+MODULE_ALIAS("md-level-6");
+
+/* This used to be two separate modules, they were: */
+MODULE_ALIAS("raid5");
+MODULE_ALIAS("raid6");
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
deleted file mode 100644
index bc69355e0100..000000000000
--- a/drivers/md/raid6main.c
+++ /dev/null
@@ -1,2427 +0,0 @@
-/*
- * raid6main.c : Multiple Devices driver for Linux
- * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
- * Copyright (C) 1999, 2000 Ingo Molnar
- * Copyright (C) 2002, 2003 H. Peter Anvin
- *
- * RAID-6 management functions. This code is derived from raid5.c.
- * Last merge from raid5.c bkcvs version 1.79 (kernel 2.6.1).
- *
- * Thanks to Penguin Computing for making the RAID-6 development possible
- * by donating a test server!
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/bitops.h>
-#include <asm/atomic.h>
-#include "raid6.h"
-
-#include <linux/raid/bitmap.h>
-
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES 256
-#define STRIPE_SIZE PAGE_SIZE
-#define STRIPE_SHIFT (PAGE_SHIFT - 9)
-#define STRIPE_SECTORS (STRIPE_SIZE>>9)
-#define IO_THRESHOLD 1
-#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK (NR_HASH - 1)
-
-#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
-
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This macro is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
-/*
- * The following can be used to debug the driver
- */
-#define RAID6_DEBUG 0 /* Extremely verbose printk */
-#define RAID6_PARANOIA 1 /* Check spinlocks */
-#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
-#if RAID6_PARANOIA && defined(CONFIG_SMP)
-# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
-#else
-# define CHECK_DEVLOCK()
-#endif
-
-#define PRINTK(x...) ((void)(RAID6_DEBUG && printk(KERN_DEBUG x)))
-#if RAID6_DEBUG
-#undef inline
-#undef __inline__
-#define inline
-#define __inline__
-#endif
-
-#if !RAID6_USE_EMPTY_ZERO_PAGE
-/* In .bss so it's zeroed */
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
-#endif
-
-static inline int raid6_next_disk(int disk, int raid_disks)
-{
- disk++;
- return (disk < raid_disks) ? disk : 0;
-}
-
-static void print_raid6_conf (raid6_conf_t *conf);
-
-static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
-{
- if (atomic_dec_and_test(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru));
- BUG_ON(atomic_read(&conf->active_stripes)==0);
- if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state))
- list_add_tail(&sh->lru, &conf->delayed_list);
- else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
- conf->seq_write == sh->bm_seq)
- list_add_tail(&sh->lru, &conf->bitmap_list);
- else {
- clear_bit(STRIPE_BIT_DELAY, &sh->state);
- list_add_tail(&sh->lru, &conf->handle_list);
- }
- md_wakeup_thread(conf->mddev->thread);
- } else {
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
- list_add_tail(&sh->lru, &conf->inactive_list);
- atomic_dec(&conf->active_stripes);
- if (!conf->inactive_blocked ||
- atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
- wake_up(&conf->wait_for_stripe);
- }
- }
-}
-static void release_stripe(struct stripe_head *sh)
-{
- raid6_conf_t *conf = sh->raid_conf;
- unsigned long flags;
-
- spin_lock_irqsave(&conf->device_lock, flags);
- __release_stripe(conf, sh);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-}
-
-static inline void remove_hash(struct stripe_head *sh)
-{
- PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
-
- hlist_del_init(&sh->hash);
-}
-
-static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
-{
- struct hlist_head *hp = stripe_hash(conf, sh->sector);
-
- PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
-
- CHECK_DEVLOCK();
- hlist_add_head(&sh->hash, hp);
-}
-
-
-/* find an idle stripe, make sure it is unhashed, and return it. */
-static struct stripe_head *get_free_stripe(raid6_conf_t *conf)
-{
- struct stripe_head *sh = NULL;
- struct list_head *first;
-
- CHECK_DEVLOCK();
- if (list_empty(&conf->inactive_list))
- goto out;
- first = conf->inactive_list.next;
- sh = list_entry(first, struct stripe_head, lru);
- list_del_init(first);
- remove_hash(sh);
- atomic_inc(&conf->active_stripes);
-out:
- return sh;
-}
-
-static void shrink_buffers(struct stripe_head *sh, int num)
-{
- struct page *p;
- int i;
-
- for (i=0; i<num ; i++) {
- p = sh->dev[i].page;
- if (!p)
- continue;
- sh->dev[i].page = NULL;
- put_page(p);
- }
-}
-
-static int grow_buffers(struct stripe_head *sh, int num)
-{
- int i;
-
- for (i=0; i<num; i++) {
- struct page *page;
-
- if (!(page = alloc_page(GFP_KERNEL))) {
- return 1;
- }
- sh->dev[i].page = page;
- }
- return 0;
-}
-
-static void raid6_build_block (struct stripe_head *sh, int i);
-
-static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
-
- BUG_ON(atomic_read(&sh->count) != 0);
- BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
-
- CHECK_DEVLOCK();
- PRINTK("init_stripe called, stripe %llu\n",
- (unsigned long long)sh->sector);
-
- remove_hash(sh);
-
- sh->sector = sector;
- sh->pd_idx = pd_idx;
- sh->state = 0;
-
- for (i=disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
-
- if (dev->toread || dev->towrite || dev->written ||
- test_bit(R5_LOCKED, &dev->flags)) {
- PRINTK("sector=%llx i=%d %p %p %p %d\n",
- (unsigned long long)sh->sector, i, dev->toread,
- dev->towrite, dev->written,
- test_bit(R5_LOCKED, &dev->flags));
- BUG();
- }
- dev->flags = 0;
- raid6_build_block(sh, i);
- }
- insert_hash(conf, sh);
-}
-
-static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
-{
- struct stripe_head *sh;
- struct hlist_node *hn;
-
- CHECK_DEVLOCK();
- PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
- hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash)
- if (sh->sector == sector)
- return sh;
- PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
- return NULL;
-}
-
-static void unplug_slaves(mddev_t *mddev);
-
-static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector,
- int pd_idx, int noblock)
-{
- struct stripe_head *sh;
-
- PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
-
- spin_lock_irq(&conf->device_lock);
-
- do {
- wait_event_lock_irq(conf->wait_for_stripe,
- conf->quiesce == 0,
- conf->device_lock, /* nothing */);
- sh = __find_stripe(conf, sector);
- if (!sh) {
- if (!conf->inactive_blocked)
- sh = get_free_stripe(conf);
- if (noblock && sh == NULL)
- break;
- if (!sh) {
- conf->inactive_blocked = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes *3/4)
- || !conf->inactive_blocked),
- conf->device_lock,
- unplug_slaves(conf->mddev);
- );
- conf->inactive_blocked = 0;
- } else
- init_stripe(sh, sector, pd_idx);
- } else {
- if (atomic_read(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru));
- } else {
- if (!test_bit(STRIPE_HANDLE, &sh->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru));
- list_del_init(&sh->lru);
- }
- }
- } while (sh == NULL);
-
- if (sh)
- atomic_inc(&sh->count);
-
- spin_unlock_irq(&conf->device_lock);
- return sh;
-}
-
-static int grow_one_stripe(raid6_conf_t *conf)
-{
- struct stripe_head *sh;
- sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
- if (!sh)
- return 0;
- memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
- sh->raid_conf = conf;
- spin_lock_init(&sh->lock);
-
- if (grow_buffers(sh, conf->raid_disks)) {
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(conf->slab_cache, sh);
- return 0;
- }
- /* we just created an active stripe so... */
- atomic_set(&sh->count, 1);
- atomic_inc(&conf->active_stripes);
- INIT_LIST_HEAD(&sh->lru);
- release_stripe(sh);
- return 1;
-}
-
-static int grow_stripes(raid6_conf_t *conf, int num)
-{
- kmem_cache_t *sc;
- int devs = conf->raid_disks;
-
- sprintf(conf->cache_name[0], "raid6/%s", mdname(conf->mddev));
-
- sc = kmem_cache_create(conf->cache_name[0],
- sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
- if (!sc)
- return 1;
- conf->slab_cache = sc;
- while (num--)
- if (!grow_one_stripe(conf))
- return 1;
- return 0;
-}
-
-static int drop_one_stripe(raid6_conf_t *conf)
-{
- struct stripe_head *sh;
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
- if (!sh)
- return 0;
- BUG_ON(atomic_read(&sh->count));
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(conf->slab_cache, sh);
- atomic_dec(&conf->active_stripes);
- return 1;
-}
-
-static void shrink_stripes(raid6_conf_t *conf)
-{
- while (drop_one_stripe(conf))
- ;
-
- if (conf->slab_cache)
- kmem_cache_destroy(conf->slab_cache);
- conf->slab_cache = NULL;
-}
-
-static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done,
- int error)
-{
- struct stripe_head *sh = bi->bi_private;
- raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
-
- if (bi->bi_size)
- return 1;
-
- for (i=0 ; i<disks; i++)
- if (bi == &sh->dev[i].req)
- break;
-
- PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
- (unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
- if (i == disks) {
- BUG();
- return 0;
- }
-
- if (uptodate) {
-#if 0
- struct bio *bio;
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- /* we can return a buffer if we bypassed the cache or
- * if the top buffer is not in highmem. If there are
- * multiple buffers, leave the extra work to
- * handle_stripe
- */
- buffer = sh->bh_read[i];
- if (buffer &&
- (!PageHighMem(buffer->b_page)
- || buffer->b_page == bh->b_page )
- ) {
- sh->bh_read[i] = buffer->b_reqnext;
- buffer->b_reqnext = NULL;
- } else
- buffer = NULL;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (sh->bh_page[i]==bh->b_page)
- set_buffer_uptodate(bh);
- if (buffer) {
- if (buffer->b_page != bh->b_page)
- memcpy(buffer->b_data, bh->b_data, bh->b_size);
- buffer->b_end_io(buffer, 1);
- }
-#else
- set_bit(R5_UPTODATE, &sh->dev[i].flags);
-#endif
- if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- printk(KERN_INFO "raid6: read error corrected!!\n");
- clear_bit(R5_ReadError, &sh->dev[i].flags);
- clear_bit(R5_ReWrite, &sh->dev[i].flags);
- }
- if (atomic_read(&conf->disks[i].rdev->read_errors))
- atomic_set(&conf->disks[i].rdev->read_errors, 0);
- } else {
- int retry = 0;
- clear_bit(R5_UPTODATE, &sh->dev[i].flags);
- atomic_inc(&conf->disks[i].rdev->read_errors);
- if (conf->mddev->degraded)
- printk(KERN_WARNING "raid6: read error not correctable.\n");
- else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
- /* Oh, no!!! */
- printk(KERN_WARNING "raid6: read error NOT corrected!!\n");
- else if (atomic_read(&conf->disks[i].rdev->read_errors)
- > conf->max_nr_stripes)
- printk(KERN_WARNING
- "raid6: Too many read errors, failing device.\n");
- else
- retry = 1;
- if (retry)
- set_bit(R5_ReadError, &sh->dev[i].flags);
- else {
- clear_bit(R5_ReadError, &sh->dev[i].flags);
- clear_bit(R5_ReWrite, &sh->dev[i].flags);
- md_error(conf->mddev, conf->disks[i].rdev);
- }
- }
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-#if 0
- /* must restore b_page before unlocking buffer... */
- if (sh->bh_page[i] != bh->b_page) {
- bh->b_page = sh->bh_page[i];
- bh->b_data = page_address(bh->b_page);
- clear_buffer_uptodate(bh);
- }
-#endif
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
- return 0;
-}
-
-static int raid6_end_write_request (struct bio *bi, unsigned int bytes_done,
- int error)
-{
- struct stripe_head *sh = bi->bi_private;
- raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
- unsigned long flags;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
-
- if (bi->bi_size)
- return 1;
-
- for (i=0 ; i<disks; i++)
- if (bi == &sh->dev[i].req)
- break;
-
- PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
- (unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
- if (i == disks) {
- BUG();
- return 0;
- }
-
- spin_lock_irqsave(&conf->device_lock, flags);
- if (!uptodate)
- md_error(conf->mddev, conf->disks[i].rdev);
-
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- __release_stripe(conf, sh);
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return 0;
-}
-
-
-static sector_t compute_blocknr(struct stripe_head *sh, int i);
-
-static void raid6_build_block (struct stripe_head *sh, int i)
-{
- struct r5dev *dev = &sh->dev[i];
- int pd_idx = sh->pd_idx;
- int qd_idx = raid6_next_disk(pd_idx, sh->raid_conf->raid_disks);
-
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_vcnt++;
- dev->req.bi_max_vecs++;
- dev->vec.bv_page = dev->page;
- dev->vec.bv_len = STRIPE_SIZE;
- dev->vec.bv_offset = 0;
-
- dev->req.bi_sector = sh->sector;
- dev->req.bi_private = sh;
-
- dev->flags = 0;
- if (i != pd_idx && i != qd_idx)
- dev->sector = compute_blocknr(sh, i);
-}
-
-static void error(mddev_t *mddev, mdk_rdev_t *rdev)
-{
- char b[BDEVNAME_SIZE];
- raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
- PRINTK("raid6: error called\n");
-
- if (!test_bit(Faulty, &rdev->flags)) {
- mddev->sb_dirty = 1;
- if (test_bit(In_sync, &rdev->flags)) {
- conf->working_disks--;
- mddev->degraded++;
- conf->failed_disks++;
- clear_bit(In_sync, &rdev->flags);
- /*
- * if recovery was running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_ERR, &mddev->recovery);
- }
- set_bit(Faulty, &rdev->flags);
- printk (KERN_ALERT
- "raid6: Disk failure on %s, disabling device."
- " Operation continuing on %d devices\n",
- bdevname(rdev->bdev,b), conf->working_disks);
- }
-}
-
-/*
- * Input: a 'big' sector number,
- * Output: index of the data and parity disk, and the sector # in them.
- */
-static sector_t raid6_compute_sector(sector_t r_sector, unsigned int raid_disks,
- unsigned int data_disks, unsigned int * dd_idx,
- unsigned int * pd_idx, raid6_conf_t *conf)
-{
- long stripe;
- unsigned long chunk_number;
- unsigned int chunk_offset;
- sector_t new_sector;
- int sectors_per_chunk = conf->chunk_size >> 9;
-
- /* First compute the information on this sector */
-
- /*
- * Compute the chunk number and the sector offset inside the chunk
- */
- chunk_offset = sector_div(r_sector, sectors_per_chunk);
- chunk_number = r_sector;
- if ( r_sector != chunk_number ) {
- printk(KERN_CRIT "raid6: ERROR: r_sector = %llu, chunk_number = %lu\n",
- (unsigned long long)r_sector, (unsigned long)chunk_number);
- BUG();
- }
-
- /*
- * Compute the stripe number
- */
- stripe = chunk_number / data_disks;
-
- /*
- * Compute the data disk and parity disk indexes inside the stripe
- */
- *dd_idx = chunk_number % data_disks;
-
- /*
- * Select the parity disk based on the user selected algorithm.
- */
-
- /**** FIX THIS ****/
- switch (conf->algorithm) {
- case ALGORITHM_LEFT_ASYMMETRIC:
- *pd_idx = raid_disks - 1 - (stripe % raid_disks);
- if (*pd_idx == raid_disks-1)
- (*dd_idx)++; /* Q D D D P */
- else if (*dd_idx >= *pd_idx)
- (*dd_idx) += 2; /* D D P Q D */
- break;
- case ALGORITHM_RIGHT_ASYMMETRIC:
- *pd_idx = stripe % raid_disks;
- if (*pd_idx == raid_disks-1)
- (*dd_idx)++; /* Q D D D P */
- else if (*dd_idx >= *pd_idx)
- (*dd_idx) += 2; /* D D P Q D */
- break;
- case ALGORITHM_LEFT_SYMMETRIC:
- *pd_idx = raid_disks - 1 - (stripe % raid_disks);
- *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
- break;
- case ALGORITHM_RIGHT_SYMMETRIC:
- *pd_idx = stripe % raid_disks;
- *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
- break;
- default:
- printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
- }
-
- PRINTK("raid6: chunk_number = %lu, pd_idx = %u, dd_idx = %u\n",
- chunk_number, *pd_idx, *dd_idx);
-
- /*
- * Finally, compute the new sector number
- */
- new_sector = (sector_t) stripe * sectors_per_chunk + chunk_offset;
- return new_sector;
-}
-
-
-static sector_t compute_blocknr(struct stripe_head *sh, int i)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int raid_disks = conf->raid_disks, data_disks = raid_disks - 2;
- sector_t new_sector = sh->sector, check;
- int sectors_per_chunk = conf->chunk_size >> 9;
- sector_t stripe;
- int chunk_offset;
- int chunk_number, dummy1, dummy2, dd_idx = i;
- sector_t r_sector;
- int i0 = i;
-
- chunk_offset = sector_div(new_sector, sectors_per_chunk);
- stripe = new_sector;
- if ( new_sector != stripe ) {
- printk(KERN_CRIT "raid6: ERROR: new_sector = %llu, stripe = %lu\n",
- (unsigned long long)new_sector, (unsigned long)stripe);
- BUG();
- }
-
- switch (conf->algorithm) {
- case ALGORITHM_LEFT_ASYMMETRIC:
- case ALGORITHM_RIGHT_ASYMMETRIC:
- if (sh->pd_idx == raid_disks-1)
- i--; /* Q D D D P */
- else if (i > sh->pd_idx)
- i -= 2; /* D D P Q D */
- break;
- case ALGORITHM_LEFT_SYMMETRIC:
- case ALGORITHM_RIGHT_SYMMETRIC:
- if (sh->pd_idx == raid_disks-1)
- i--; /* Q D D D P */
- else {
- /* D D P Q D */
- if (i < sh->pd_idx)
- i += raid_disks;
- i -= (sh->pd_idx + 2);
- }
- break;
- default:
- printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
- }
-
- PRINTK("raid6: compute_blocknr: pd_idx = %u, i0 = %u, i = %u\n", sh->pd_idx, i0, i);
-
- chunk_number = stripe * data_disks + i;
- r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
-
- check = raid6_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
- if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
- printk(KERN_CRIT "raid6: compute_blocknr: map not correct\n");
- return 0;
- }
- return r_sector;
-}
-
-
-
-/*
- * Copy data between a page in the stripe cache, and one or more bion
- * The page could align with the middle of the bio, or there could be
- * several bion, each with several bio_vecs, which cover part of the page
- * Multiple bion are linked together on bi_next. There may be extras
- * at the end of this list. We ignore them.
- */
-static void copy_data(int frombio, struct bio *bio,
- struct page *page,
- sector_t sector)
-{
- char *pa = page_address(page);
- struct bio_vec *bvl;
- int i;
- int page_offset;
-
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
- else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
- bio_for_each_segment(bvl, bio, i) {
- int len = bio_iovec_idx(bio,i)->bv_len;
- int clen;
- int b_offset = 0;
-
- if (page_offset < 0) {
- b_offset = -page_offset;
- page_offset += b_offset;
- len -= b_offset;
- }
-
- if (len > 0 && page_offset + len > STRIPE_SIZE)
- clen = STRIPE_SIZE - page_offset;
- else clen = len;
-
- if (clen > 0) {
- char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
- if (frombio)
- memcpy(pa+page_offset, ba+b_offset, clen);
- else
- memcpy(ba+b_offset, pa+page_offset, clen);
- __bio_kunmap_atomic(ba, KM_USER0);
- }
- if (clen < len) /* hit end of page */
- break;
- page_offset += len;
- }
-}
-
-#define check_xor() do { \
- if (count == MAX_XOR_BLOCKS) { \
- xor_block(count, STRIPE_SIZE, ptr); \
- count = 1; \
- } \
- } while(0)
-
-/* Compute P and Q syndromes */
-static void compute_parity(struct stripe_head *sh, int method)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
- struct bio *chosen;
- /**** FIX THIS: This could be very bad if disks is close to 256 ****/
- void *ptrs[disks];
-
- qd_idx = raid6_next_disk(pd_idx, disks);
- d0_idx = raid6_next_disk(qd_idx, disks);
-
- PRINTK("compute_parity, stripe %llu, method %d\n",
- (unsigned long long)sh->sector, method);
-
- switch(method) {
- case READ_MODIFY_WRITE:
- BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
- case RECONSTRUCT_WRITE:
- for (i= disks; i-- ;)
- if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- }
- break;
- case CHECK_PARITY:
- BUG(); /* Not implemented yet */
- }
-
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- sector_t sector = sh->dev[i].sector;
- struct bio *wbi = sh->dev[i].written;
- while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
- copy_data(1, wbi, sh->dev[i].page, sector);
- wbi = r5_next_bio(wbi, sector);
- }
-
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(R5_UPTODATE, &sh->dev[i].flags);
- }
-
-// switch(method) {
-// case RECONSTRUCT_WRITE:
-// case CHECK_PARITY:
-// case UPDATE_PARITY:
- /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
- /* FIX: Is this ordering of drives even remotely optimal? */
- count = 0;
- i = d0_idx;
- do {
- ptrs[count++] = page_address(sh->dev[i].page);
- if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
- printk("block %d/%d not uptodate on parity calc\n", i,count);
- i = raid6_next_disk(i, disks);
- } while ( i != d0_idx );
-// break;
-// }
-
- raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
-
- switch(method) {
- case RECONSTRUCT_WRITE:
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
- break;
- case UPDATE_PARITY:
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
- break;
- }
-}
-
-/* Compute one missing block */
-static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
- void *ptr[MAX_XOR_BLOCKS], *p;
- int pd_idx = sh->pd_idx;
- int qd_idx = raid6_next_disk(pd_idx, disks);
-
- PRINTK("compute_block_1, stripe %llu, idx %d\n",
- (unsigned long long)sh->sector, dd_idx);
-
- if ( dd_idx == qd_idx ) {
- /* We're actually computing the Q drive */
- compute_parity(sh, UPDATE_PARITY);
- } else {
- ptr[0] = page_address(sh->dev[dd_idx].page);
- if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
- count = 1;
- for (i = disks ; i--; ) {
- if (i == dd_idx || i == qd_idx)
- continue;
- p = page_address(sh->dev[i].page);
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
- ptr[count++] = p;
- else
- printk("compute_block() %d, stripe %llu, %d"
- " not present\n", dd_idx,
- (unsigned long long)sh->sector, i);
-
- check_xor();
- }
- if (count != 1)
- xor_block(count, STRIPE_SIZE, ptr);
- if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
- else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
- }
-}
-
-/* Compute two missing blocks */
-static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
- int pd_idx = sh->pd_idx;
- int qd_idx = raid6_next_disk(pd_idx, disks);
- int d0_idx = raid6_next_disk(qd_idx, disks);
- int faila, failb;
-
- /* faila and failb are disk numbers relative to d0_idx */
- /* pd_idx become disks-2 and qd_idx become disks-1 */
- faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
- failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
-
- BUG_ON(faila == failb);
- if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
-
- PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
- (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
-
- if ( failb == disks-1 ) {
- /* Q disk is one of the missing disks */
- if ( faila == disks-2 ) {
- /* Missing P+Q, just recompute */
- compute_parity(sh, UPDATE_PARITY);
- return;
- } else {
- /* We're missing D+Q; recompute D from P */
- compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
- compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
- return;
- }
- }
-
- /* We're missing D+P or D+D; build pointer table */
- {
- /**** FIX THIS: This could be very bad if disks is close to 256 ****/
- void *ptrs[disks];
-
- count = 0;
- i = d0_idx;
- do {
- ptrs[count++] = page_address(sh->dev[i].page);
- i = raid6_next_disk(i, disks);
- if (i != dd_idx1 && i != dd_idx2 &&
- !test_bit(R5_UPTODATE, &sh->dev[i].flags))
- printk("compute_2 with missing block %d/%d\n", count, i);
- } while ( i != d0_idx );
-
- if ( failb == disks-2 ) {
- /* We're missing D+P. */
- raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
- } else {
- /* We're missing D+D. */
- raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
- }
-
- /* Both the above update both missing blocks */
- set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
- set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
- }
-}
-
-
-/*
- * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
- * The bi_next chain must be in order.
- */
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
-{
- struct bio **bip;
- raid6_conf_t *conf = sh->raid_conf;
- int firstwrite=0;
-
- PRINTK("adding bh b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_sector,
- (unsigned long long)sh->sector);
-
-
- spin_lock(&sh->lock);
- spin_lock_irq(&conf->device_lock);
- if (forwrite) {
- bip = &sh->dev[dd_idx].towrite;
- if (*bip == NULL && sh->dev[dd_idx].written == NULL)
- firstwrite = 1;
- } else
- bip = &sh->dev[dd_idx].toread;
- while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
- goto overlap;
- bip = &(*bip)->bi_next;
- }
- if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
- goto overlap;
-
- BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
- if (*bip)
- bi->bi_next = *bip;
- *bip = bi;
- bi->bi_phys_segments ++;
- spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
-
- PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)bi->bi_sector,
- (unsigned long long)sh->sector, dd_idx);
-
- if (conf->mddev->bitmap && firstwrite) {
- sh->bm_seq = conf->seq_write;
- bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0);
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
-
- if (forwrite) {
- /* check if page is covered */
- sector_t sector = sh->dev[dd_idx].sector;
- for (bi=sh->dev[dd_idx].towrite;
- sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
- bi && bi->bi_sector <= sector;
- bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
- if (bi->bi_sector + (bi->bi_size>>9) >= sector)
- sector = bi->bi_sector + (bi->bi_size>>9);
- }
- if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
- set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
- }
- return 1;
-
- overlap:
- set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
- spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
- return 0;
-}
-
-
-static int page_is_zero(struct page *p)
-{
- char *a = page_address(p);
- return ((*(u32*)a) == 0 &&
- memcmp(a, a+4, STRIPE_SIZE-4)==0);
-}
-/*
- * handle_stripe - do things to a stripe.
- *
- * We lock the stripe and then examine the state of various bits
- * to see what needs to be done.
- * Possible results:
- * return some read request which now have data
- * return some write requests which are safely on disc
- * schedule a read on some buffers
- * schedule a write of some buffers
- * return confirmation of parity correctness
- *
- * Parity calculations are done inside the stripe lock
- * buffers are taken off read_list or write_list, and bh_cache buffers
- * get BH_Lock set before the stripe lock is released.
- *
- */
-
-static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
-{
- raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks;
- struct bio *return_bi= NULL;
- struct bio *bi;
- int i;
- int syncing;
- int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
- int non_overwrite = 0;
- int failed_num[2] = {0, 0};
- struct r5dev *dev, *pdev, *qdev;
- int pd_idx = sh->pd_idx;
- int qd_idx = raid6_next_disk(pd_idx, disks);
- int p_failed, q_failed;
-
- PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
- (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
- pd_idx, qd_idx);
-
- spin_lock(&sh->lock);
- clear_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-
- syncing = test_bit(STRIPE_SYNCING, &sh->state);
- /* Now to look around and see what can be done */
-
- rcu_read_lock();
- for (i=disks; i--; ) {
- mdk_rdev_t *rdev;
- dev = &sh->dev[i];
- clear_bit(R5_Insync, &dev->flags);
-
- PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
- struct bio *rbi, *rbi2;
- PRINTK("Return read for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
- rbi = dev->toread;
- dev->toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
- while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- copy_data(0, rbi, dev->page, dev->sector);
- rbi2 = r5_next_bio(rbi, dev->sector);
- spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
- spin_unlock_irq(&conf->device_lock);
- rbi = rbi2;
- }
- }
-
- /* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
-
-
- if (dev->toread) to_read++;
- if (dev->towrite) {
- to_write++;
- if (!test_bit(R5_OVERWRITE, &dev->flags))
- non_overwrite++;
- }
- if (dev->written) written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (!rdev || !test_bit(In_sync, &rdev->flags)) {
- /* The ReadError flag will just be confusing now */
- clear_bit(R5_ReadError, &dev->flags);
- clear_bit(R5_ReWrite, &dev->flags);
- }
- if (!rdev || !test_bit(In_sync, &rdev->flags)
- || test_bit(R5_ReadError, &dev->flags)) {
- if ( failed < 2 )
- failed_num[failed] = i;
- failed++;
- } else
- set_bit(R5_Insync, &dev->flags);
- }
- rcu_read_unlock();
- PRINTK("locked=%d uptodate=%d to_read=%d"
- " to_write=%d failed=%d failed_num=%d,%d\n",
- locked, uptodate, to_read, to_write, failed,
- failed_num[0], failed_num[1]);
- /* check if the array has lost >2 devices and, if so, some requests might
- * need to be failed
- */
- if (failed > 2 && to_read+to_write+written) {
- for (i=disks; i--; ) {
- int bitmap_end = 0;
-
- if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- mdk_rdev_t *rdev;
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags))
- /* multiple read failures in one stripe */
- md_error(conf->mddev, rdev);
- rcu_read_unlock();
- }
-
- spin_lock_irq(&conf->device_lock);
- /* fail all writes first */
- bi = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
- if (bi) { to_write--; bitmap_end = 1; }
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- /* and fail all 'written' */
- bi = sh->dev[i].written;
- sh->dev[i].written = NULL;
- if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
- struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = bi2;
- }
-
- /* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
- test_bit(R5_ReadError, &sh->dev[i].flags)) {
- bi = sh->dev[i].toread;
- sh->dev[i].toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
- if (bi) to_read--;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
- }
- }
- if (failed > 2 && syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- syncing = 0;
- }
-
- /*
- * might be able to return some write requests if the parity blocks
- * are safe, or on a failed drive
- */
- pdev = &sh->dev[pd_idx];
- p_failed = (failed >= 1 && failed_num[0] == pd_idx)
- || (failed >= 2 && failed_num[1] == pd_idx);
- qdev = &sh->dev[qd_idx];
- q_failed = (failed >= 1 && failed_num[0] == qd_idx)
- || (failed >= 2 && failed_num[1] == qd_idx);
-
- if ( written &&
- ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
- && !test_bit(R5_LOCKED, &pdev->flags)
- && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
- ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
- && !test_bit(R5_LOCKED, &qdev->flags)
- && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
- /* any written block on an uptodate or failed drive can be
- * returned. Note that if we 'wrote' to a failed drive,
- * it will be UPTODATE, but never LOCKED, so we don't need
- * to test 'failed' directly.
- */
- for (i=disks; i--; )
- if (sh->dev[i].written) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags) ) {
- /* We can return any write requests */
- int bitmap_end = 0;
- struct bio *wbi, *wbi2;
- PRINTK("Return write for stripe %llu disc %d\n",
- (unsigned long long)sh->sector, i);
- spin_lock_irq(&conf->device_lock);
- wbi = dev->written;
- dev->written = NULL;
- while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- wbi2 = r5_next_bio(wbi, dev->sector);
- if (--wbi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- wbi->bi_next = return_bi;
- return_bi = wbi;
- }
- wbi = wbi2;
- }
- if (dev->towrite == NULL)
- bitmap_end = 1;
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
- !test_bit(STRIPE_DEGRADED, &sh->state), 0);
- }
- }
- }
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- syncing ||
- (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
- (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
- )
- ) {
- /* we would like to get this block, possibly
- * by computing it, but we might not be able to
- */
- if (uptodate == disks-1) {
- PRINTK("Computing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- compute_block_1(sh, i, 0);
- uptodate++;
- } else if ( uptodate == disks-2 && failed >= 2 ) {
- /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
- int other;
- for (other=disks; other--;) {
- if ( other == i )
- continue;
- if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
- break;
- }
- BUG_ON(other < 0);
- PRINTK("Computing stripe %llu blocks %d,%d\n",
- (unsigned long long)sh->sector, i, other);
- compute_block_2(sh, i, other);
- uptodate += 2;
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
-#if 0
- /* if I am just reading this block and we don't have
- a failed drive, or any pending writes then sidestep the cache */
- if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
- ! syncing && !failed && !to_write) {
- sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
- sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
- }
-#endif
- locked++;
- PRINTK("Reading block %d (sync=%d)\n",
- i, syncing);
- }
- }
- }
- set_bit(STRIPE_HANDLE, &sh->state);
- }
-
- /* now to consider writing and what else, if anything should be read */
- if (to_write) {
- int rcw=0, must_compute=0;
- for (i=disks ; i--;) {
- dev = &sh->dev[i];
- /* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && i != pd_idx && i != qd_idx
- && (!test_bit(R5_LOCKED, &dev->flags)
-#if 0
- || sh->bh_page[i] != bh->b_page
-#endif
- ) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags)) rcw++;
- else {
- PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
- must_compute++;
- }
- }
- }
- PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
- (unsigned long long)sh->sector, rcw, must_compute);
- set_bit(STRIPE_HANDLE, &sh->state);
-
- if (rcw > 0)
- /* want reconstruct write, but need to get some data */
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && !(failed == 0 && (i == pd_idx || i == qd_idx))
- && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- test_bit(R5_Insync, &dev->flags)) {
- if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- {
- PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- } else {
- PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- /* now if nothing is locked, and if we have enough data, we can start a write request */
- if (locked == 0 && rcw == 0 &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- if ( must_compute > 0 ) {
- /* We have failed blocks and need to compute them */
- switch ( failed ) {
- case 0: BUG();
- case 1: compute_block_1(sh, failed_num[0], 0); break;
- case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
- default: BUG(); /* This request should have been failed? */
- }
- }
-
- PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
- compute_parity(sh, RECONSTRUCT_WRITE);
- /* now every locked buffer is ready to be written */
- for (i=disks; i--;)
- if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
- PRINTK("Writing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- locked++;
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- }
- /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
- set_bit(STRIPE_INSYNC, &sh->state);
-
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
- }
- }
-
- /* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough data
- * is available
- */
- if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
- int update_p = 0, update_q = 0;
- struct r5dev *dev;
-
- set_bit(STRIPE_HANDLE, &sh->state);
-
- BUG_ON(failed>2);
- BUG_ON(uptodate < disks);
- /* Want to check and possibly repair P and Q.
- * However there could be one 'failed' device, in which
- * case we can only check one of them, possibly using the
- * other to generate missing data
- */
-
- /* If !tmp_page, we cannot do the calculations,
- * but as we have set STRIPE_HANDLE, we will soon be called
- * by stripe_handle with a tmp_page - just wait until then.
- */
- if (tmp_page) {
- if (failed == q_failed) {
- /* The only possible failed device holds 'Q', so it makes
- * sense to check P (If anything else were failed, we would
- * have used P to recreate it).
- */
- compute_block_1(sh, pd_idx, 1);
- if (!page_is_zero(sh->dev[pd_idx].page)) {
- compute_block_1(sh,pd_idx,0);
- update_p = 1;
- }
- }
- if (!q_failed && failed < 2) {
- /* q is not failed, and we didn't use it to generate
- * anything, so it makes sense to check it
- */
- memcpy(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE);
- compute_parity(sh, UPDATE_PARITY);
- if (memcmp(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE)!= 0) {
- clear_bit(STRIPE_INSYNC, &sh->state);
- update_q = 1;
- }
- }
- if (update_p || update_q) {
- conf->mddev->resync_mismatches += STRIPE_SECTORS;
- if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
- update_p = update_q = 0;
- }
-
- /* now write out any block on a failed drive,
- * or P or Q if they need it
- */
-
- if (failed == 2) {
- dev = &sh->dev[failed_num[1]];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- if (failed >= 1) {
- dev = &sh->dev[failed_num[0]];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
-
- if (update_p) {
- dev = &sh->dev[pd_idx];
- locked ++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- if (update_q) {
- dev = &sh->dev[qd_idx];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- clear_bit(STRIPE_DEGRADED, &sh->state);
-
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
-
- if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,1);
- clear_bit(STRIPE_SYNCING, &sh->state);
- }
-
- /* If the failed drives are just a ReadError, then we might need
- * to progress the repair/check process
- */
- if (failed <= 2 && ! conf->mddev->ro)
- for (i=0; i<failed;i++) {
- dev = &sh->dev[failed_num[i]];
- if (test_bit(R5_ReadError, &dev->flags)
- && !test_bit(R5_LOCKED, &dev->flags)
- && test_bit(R5_UPTODATE, &dev->flags)
- ) {
- if (!test_bit(R5_ReWrite, &dev->flags)) {
- set_bit(R5_Wantwrite, &dev->flags);
- set_bit(R5_ReWrite, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- } else {
- /* let's read it back */
- set_bit(R5_Wantread, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- }
- }
- }
- spin_unlock(&sh->lock);
-
- while ((bi=return_bi)) {
- int bytes = bi->bi_size;
-
- return_bi = bi->bi_next;
- bi->bi_next = NULL;
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes, 0);
- }
- for (i=disks; i-- ;) {
- int rw;
- struct bio *bi;
- mdk_rdev_t *rdev;
- if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
- rw = 1;
- else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = 0;
- else
- continue;
-
- bi = &sh->dev[i].req;
-
- bi->bi_rw = rw;
- if (rw)
- bi->bi_end_io = raid6_end_write_request;
- else
- bi->bi_end_io = raid6_end_read_request;
-
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = NULL;
- if (rdev)
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- if (rdev) {
- if (syncing)
- md_sync_acct(rdev->bdev, STRIPE_SECTORS);
-
- bi->bi_bdev = rdev->bdev;
- PRINTK("for %llu schedule op %ld on disc %d\n",
- (unsigned long long)sh->sector, bi->bi_rw, i);
- atomic_inc(&sh->count);
- bi->bi_sector = sh->sector + rdev->data_offset;
- bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
- bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
- bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
- bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
- bi->bi_next = NULL;
- if (rw == WRITE &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
- generic_make_request(bi);
- } else {
- if (rw == 1)
- set_bit(STRIPE_DEGRADED, &sh->state);
- PRINTK("skip op %ld on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
-}
-
-static void raid6_activate_delayed(raid6_conf_t *conf)
-{
- if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
- while (!list_empty(&conf->delayed_list)) {
- struct list_head *l = conf->delayed_list.next;
- struct stripe_head *sh;
- sh = list_entry(l, struct stripe_head, lru);
- list_del_init(l);
- clear_bit(STRIPE_DELAYED, &sh->state);
- if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- atomic_inc(&conf->preread_active_stripes);
- list_add_tail(&sh->lru, &conf->handle_list);
- }
- }
-}
-
-static void activate_bit_delay(raid6_conf_t *conf)
-{
- /* device_lock is held */
- struct list_head head;
- list_add(&head, &conf->bitmap_list);
- list_del_init(&conf->bitmap_list);
- while (!list_empty(&head)) {
- struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
- list_del_init(&sh->lru);
- atomic_inc(&sh->count);
- __release_stripe(conf, sh);
- }
-}
-
-static void unplug_slaves(mddev_t *mddev)
-{
- raid6_conf_t *conf = mddev_to_conf(mddev);
- int i;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void raid6_unplug_device(request_queue_t *q)
-{
- mddev_t *mddev = q->queuedata;
- raid6_conf_t *conf = mddev_to_conf(mddev);
- unsigned long flags;
-
- spin_lock_irqsave(&conf->device_lock, flags);
-
- if (blk_remove_plug(q)) {
- conf->seq_flush++;
- raid6_activate_delayed(conf);
- }
- md_wakeup_thread(mddev->thread);
-
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- unplug_slaves(mddev);
-}
-
-static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid6_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
-static inline void raid6_plug_device(raid6_conf_t *conf)
-{
- spin_lock_irq(&conf->device_lock);
- blk_plug_device(conf->mddev->queue);
- spin_unlock_irq(&conf->device_lock);
-}
-
-static int make_request (request_queue_t *q, struct bio * bi)
-{
- mddev_t *mddev = q->queuedata;
- raid6_conf_t *conf = mddev_to_conf(mddev);
- const unsigned int raid_disks = conf->raid_disks;
- const unsigned int data_disks = raid_disks - 2;
- unsigned int dd_idx, pd_idx;
- sector_t new_sector;
- sector_t logical_sector, last_sector;
- struct stripe_head *sh;
- const int rw = bio_data_dir(bi);
-
- if (unlikely(bio_barrier(bi))) {
- bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
- return 0;
- }
-
- md_write_start(mddev, bi);
-
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
-
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
-
- bi->bi_next = NULL;
- bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
-
- for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
- DEFINE_WAIT(w);
-
- new_sector = raid6_compute_sector(logical_sector,
- raid_disks, data_disks, &dd_idx, &pd_idx, conf);
-
- PRINTK("raid6: make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
- (unsigned long long)logical_sector);
-
- retry:
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
- if (sh) {
- if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
- /* Add failed due to overlap. Flush everything
- * and wait a while
- */
- raid6_unplug_device(mddev->queue);
- release_stripe(sh);
- schedule();
- goto retry;
- }
- finish_wait(&conf->wait_for_overlap, &w);
- raid6_plug_device(conf);
- handle_stripe(sh, NULL);
- release_stripe(sh);
- } else {
- /* cannot get stripe for read-ahead, just give-up */
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- finish_wait(&conf->wait_for_overlap, &w);
- break;
- }
-
- }
- spin_lock_irq(&conf->device_lock);
- if (--bi->bi_phys_segments == 0) {
- int bytes = bi->bi_size;
-
- if (rw == WRITE )
- md_write_end(mddev);
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes, 0);
- }
- spin_unlock_irq(&conf->device_lock);
- return 0;
-}
-
-/* FIXME go_faster isn't used */
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
-{
- raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
- struct stripe_head *sh;
- int sectors_per_chunk = conf->chunk_size >> 9;
- sector_t x;
- unsigned long stripe;
- int chunk_offset;
- int dd_idx, pd_idx;
- sector_t first_sector;
- int raid_disks = conf->raid_disks;
- int data_disks = raid_disks - 2;
- sector_t max_sector = mddev->size << 1;
- int sync_blocks;
- int still_degraded = 0;
- int i;
-
- if (sector_nr >= max_sector) {
- /* just being told to finish up .. nothing much to do */
- unplug_slaves(mddev);
-
- if (mddev->curr_resync < max_sector) /* aborted */
- bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
- else /* completed sync */
- conf->fullsync = 0;
- bitmap_close_sync(mddev->bitmap);
-
- return 0;
- }
- /* if there are 2 or more failed drives and we are trying
- * to resync, then assert that we are finished, because there is
- * nothing we can do.
- */
- if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- sector_t rv = (mddev->size << 1) - sector_nr;
- *skipped = 1;
- return rv;
- }
- if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
- !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
- /* we can skip this block, and probably more */
- sync_blocks /= STRIPE_SECTORS;
- *skipped = 1;
- return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
- }
-
- x = sector_nr;
- chunk_offset = sector_div(x, sectors_per_chunk);
- stripe = x;
- BUG_ON(x != stripe);
-
- first_sector = raid6_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
- + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
- sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
- if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
- /* make sure we don't swamp the stripe cache if someone else
- * is trying to get access
- */
- schedule_timeout_uninterruptible(1);
- }
- /* Need to check if array will still be degraded after recovery/resync
- * We don't need to check the 'failed' flag as when that gets set,
- * recovery aborts.
- */
- for (i=0; i<mddev->raid_disks; i++)
- if (conf->disks[i].rdev == NULL)
- still_degraded = 1;
-
- bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
-
- spin_lock(&sh->lock);
- set_bit(STRIPE_SYNCING, &sh->state);
- clear_bit(STRIPE_INSYNC, &sh->state);
- spin_unlock(&sh->lock);
-
- handle_stripe(sh, NULL);
- release_stripe(sh);
-
- return STRIPE_SECTORS;
-}
-
-/*
- * This is our raid6 kernel thread.
- *
- * We scan the hash table for stripes which can be handled now.
- * During the scan, completed stripes are saved for us by the interrupt
- * handler, so that they will not have to wait for our next wakeup.
- */
-static void raid6d (mddev_t *mddev)
-{
- struct stripe_head *sh;
- raid6_conf_t *conf = mddev_to_conf(mddev);
- int handled;
-
- PRINTK("+++ raid6d active\n");
-
- md_check_recovery(mddev);
-
- handled = 0;
- spin_lock_irq(&conf->device_lock);
- while (1) {
- struct list_head *first;
-
- if (conf->seq_flush - conf->seq_write > 0) {
- int seq = conf->seq_flush;
- spin_unlock_irq(&conf->device_lock);
- bitmap_unplug(mddev->bitmap);
- spin_lock_irq(&conf->device_lock);
- conf->seq_write = seq;
- activate_bit_delay(conf);
- }
-
- if (list_empty(&conf->handle_list) &&
- atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
- !blk_queue_plugged(mddev->queue) &&
- !list_empty(&conf->delayed_list))
- raid6_activate_delayed(conf);
-
- if (list_empty(&conf->handle_list))
- break;
-
- first = conf->handle_list.next;
- sh = list_entry(first, struct stripe_head, lru);
-
- list_del_init(first);
- atomic_inc(&sh->count);
- BUG_ON(atomic_read(&sh->count)!= 1);
- spin_unlock_irq(&conf->device_lock);
-
- handled++;
- handle_stripe(sh, conf->spare_page);
- release_stripe(sh);
-
- spin_lock_irq(&conf->device_lock);
- }
- PRINTK("%d stripes handled\n", handled);
-
- spin_unlock_irq(&conf->device_lock);
-
- unplug_slaves(mddev);
-
- PRINTK("--- raid6d inactive\n");
-}
-
-static ssize_t
-raid6_show_stripe_cache_size(mddev_t *mddev, char *page)
-{
- raid6_conf_t *conf = mddev_to_conf(mddev);
- if (conf)
- return sprintf(page, "%d\n", conf->max_nr_stripes);
- else
- return 0;
-}
-
-static ssize_t
-raid6_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
-{
- raid6_conf_t *conf = mddev_to_conf(mddev);
- char *end;
- int new;
- if (len >= PAGE_SIZE)
- return -EINVAL;
- if (!conf)
- return -ENODEV;
-
- new = simple_strtoul(page, &end, 10);
- if (!*page || (*end && *end != '\n') )
- return -EINVAL;
- if (new <= 16 || new > 32768)
- return -EINVAL;
- while (new < conf->max_nr_stripes) {
- if (drop_one_stripe(conf))
- conf->max_nr_stripes--;
- else
- break;
- }
- while (new > conf->max_nr_stripes) {
- if (grow_one_stripe(conf))
- conf->max_nr_stripes++;
- else break;
- }
- return len;
-}
-
-static struct md_sysfs_entry
-raid6_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
- raid6_show_stripe_cache_size,
- raid6_store_stripe_cache_size);
-
-static ssize_t
-stripe_cache_active_show(mddev_t *mddev, char *page)
-{
- raid6_conf_t *conf = mddev_to_conf(mddev);
- if (conf)
- return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
- else
- return 0;
-}
-
-static struct md_sysfs_entry
-raid6_stripecache_active = __ATTR_RO(stripe_cache_active);
-
-static struct attribute *raid6_attrs[] = {
- &raid6_stripecache_size.attr,
- &raid6_stripecache_active.attr,
- NULL,
-};
-static struct attribute_group raid6_attrs_group = {
- .name = NULL,
- .attrs = raid6_attrs,
-};
-
-static int run(mddev_t *mddev)
-{
- raid6_conf_t *conf;
- int raid_disk, memory;
- mdk_rdev_t *rdev;
- struct disk_info *disk;
- struct list_head *tmp;
-
- if (mddev->level != 6) {
- PRINTK("raid6: %s: raid level not set to 6 (%d)\n", mdname(mddev), mddev->level);
- return -EIO;
- }
-
- mddev->private = kzalloc(sizeof (raid6_conf_t), GFP_KERNEL);
- if ((conf = mddev->private) == NULL)
- goto abort;
- conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
- GFP_KERNEL);
- if (!conf->disks)
- goto abort;
-
- conf->mddev = mddev;
-
- if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
- goto abort;
-
- conf->spare_page = alloc_page(GFP_KERNEL);
- if (!conf->spare_page)
- goto abort;
-
- spin_lock_init(&conf->device_lock);
- init_waitqueue_head(&conf->wait_for_stripe);
- init_waitqueue_head(&conf->wait_for_overlap);
- INIT_LIST_HEAD(&conf->handle_list);
- INIT_LIST_HEAD(&conf->delayed_list);
- INIT_LIST_HEAD(&conf->bitmap_list);
- INIT_LIST_HEAD(&conf->inactive_list);
- atomic_set(&conf->active_stripes, 0);
- atomic_set(&conf->preread_active_stripes, 0);
-
- PRINTK("raid6: run(%s) called.\n", mdname(mddev));
-
- ITERATE_RDEV(mddev,rdev,tmp) {
- raid_disk = rdev->raid_disk;
- if (raid_disk >= mddev->raid_disks
- || raid_disk < 0)
- continue;
- disk = conf->disks + raid_disk;
-
- disk->rdev = rdev;
-
- if (test_bit(In_sync, &rdev->flags)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_INFO "raid6: device %s operational as raid"
- " disk %d\n", bdevname(rdev->bdev,b),
- raid_disk);
- conf->working_disks++;
- }
- }
-
- conf->raid_disks = mddev->raid_disks;
-
- /*
- * 0 for a fully functional array, 1 or 2 for a degraded array.
- */
- mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
- conf->mddev = mddev;
- conf->chunk_size = mddev->chunk_size;
- conf->level = mddev->level;
- conf->algorithm = mddev->layout;
- conf->max_nr_stripes = NR_STRIPES;
-
- /* device size must be a multiple of chunk size */
- mddev->size &= ~(mddev->chunk_size/1024 -1);
- mddev->resync_max_sectors = mddev->size << 1;
-
- if (conf->raid_disks < 4) {
- printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
- mdname(mddev), conf->raid_disks);
- goto abort;
- }
- if (!conf->chunk_size || conf->chunk_size % 4) {
- printk(KERN_ERR "raid6: invalid chunk size %d for %s\n",
- conf->chunk_size, mdname(mddev));
- goto abort;
- }
- if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
- printk(KERN_ERR
- "raid6: unsupported parity algorithm %d for %s\n",
- conf->algorithm, mdname(mddev));
- goto abort;
- }
- if (mddev->degraded > 2) {
- printk(KERN_ERR "raid6: not enough operational devices for %s"
- " (%d/%d failed)\n",
- mdname(mddev), conf->failed_disks, conf->raid_disks);
- goto abort;
- }
-
- if (mddev->degraded > 0 &&
- mddev->recovery_cp != MaxSector) {
- if (mddev->ok_start_degraded)
- printk(KERN_WARNING "raid6: starting dirty degraded array:%s"
- "- data corruption possible.\n",
- mdname(mddev));
- else {
- printk(KERN_ERR "raid6: cannot start dirty degraded array"
- " for %s\n", mdname(mddev));
- goto abort;
- }
- }
-
- {
- mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid6: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto abort;
- }
- }
-
- memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
- conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
- if (grow_stripes(conf, conf->max_nr_stripes)) {
- printk(KERN_ERR
- "raid6: couldn't allocate %dkB for buffers\n", memory);
- shrink_stripes(conf);
- md_unregister_thread(mddev->thread);
- goto abort;
- } else
- printk(KERN_INFO "raid6: allocated %dkB for %s\n",
- memory, mdname(mddev));
-
- if (mddev->degraded == 0)
- printk(KERN_INFO "raid6: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- conf->algorithm);
- else
- printk(KERN_ALERT "raid6: raid level %d set %s active with %d"
- " out of %d devices, algorithm %d\n", conf->level,
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, conf->algorithm);
-
- print_raid6_conf(conf);
-
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (n-2) * chunksize where 'n' is the number of raid devices
- */
- {
- int stripe = (mddev->raid_disks-2) * mddev->chunk_size
- / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- }
-
- /* Ok, everything is just fine now */
- sysfs_create_group(&mddev->kobj, &raid6_attrs_group);
-
- mddev->array_size = mddev->size * (mddev->raid_disks - 2);
-
- mddev->queue->unplug_fn = raid6_unplug_device;
- mddev->queue->issue_flush_fn = raid6_issue_flush;
- return 0;
-abort:
- if (conf) {
- print_raid6_conf(conf);
- safe_put_page(conf->spare_page);
- kfree(conf->stripe_hashtbl);
- kfree(conf->disks);
- kfree(conf);
- }
- mddev->private = NULL;
- printk(KERN_ALERT "raid6: failed to run raid set %s\n", mdname(mddev));
- return -EIO;
-}
-
-
-
-static int stop (mddev_t *mddev)
-{
- raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
-
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
- shrink_stripes(conf);
- kfree(conf->stripe_hashtbl);
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- sysfs_remove_group(&mddev->kobj, &raid6_attrs_group);
- kfree(conf);
- mddev->private = NULL;
- return 0;
-}
-
-#if RAID6_DUMPSTATE
-static void print_sh (struct seq_file *seq, struct stripe_head *sh)
-{
- int i;
-
- seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
- (unsigned long long)sh->sector, sh->pd_idx, sh->state);
- seq_printf(seq, "sh %llu, count %d.\n",
- (unsigned long long)sh->sector, atomic_read(&sh->count));
- seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
- for (i = 0; i < sh->raid_conf->raid_disks; i++) {
- seq_printf(seq, "(cache%d: %p %ld) ",
- i, sh->dev[i].page, sh->dev[i].flags);
- }
- seq_printf(seq, "\n");
-}
-
-static void printall (struct seq_file *seq, raid6_conf_t *conf)
-{
- struct stripe_head *sh;
- struct hlist_node *hn;
- int i;
-
- spin_lock_irq(&conf->device_lock);
- for (i = 0; i < NR_HASH; i++) {
- sh = conf->stripe_hashtbl[i];
- hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
- if (sh->raid_conf != conf)
- continue;
- print_sh(seq, sh);
- }
- }
- spin_unlock_irq(&conf->device_lock);
-}
-#endif
-
-static void status (struct seq_file *seq, mddev_t *mddev)
-{
- raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
- int i;
-
- seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
- seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
- for (i = 0; i < conf->raid_disks; i++)
- seq_printf (seq, "%s",
- conf->disks[i].rdev &&
- test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
- seq_printf (seq, "]");
-#if RAID6_DUMPSTATE
- seq_printf (seq, "\n");
- printall(seq, conf);
-#endif
-}
-
-static void print_raid6_conf (raid6_conf_t *conf)
-{
- int i;
- struct disk_info *tmp;
-
- printk("RAID6 conf printout:\n");
- if (!conf) {
- printk("(conf==NULL)\n");
- return;
- }
- printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
- conf->working_disks, conf->failed_disks);
-
- for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
- tmp = conf->disks + i;
- if (tmp->rdev)
- printk(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
- }
-}
-
-static int raid6_spare_active(mddev_t *mddev)
-{
- int i;
- raid6_conf_t *conf = mddev->private;
- struct disk_info *tmp;
-
- for (i = 0; i < conf->raid_disks; i++) {
- tmp = conf->disks + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_bit(In_sync, &tmp->rdev->flags)) {
- mddev->degraded--;
- conf->failed_disks--;
- conf->working_disks++;
- set_bit(In_sync, &tmp->rdev->flags);
- }
- }
- print_raid6_conf(conf);
- return 0;
-}
-
-static int raid6_remove_disk(mddev_t *mddev, int number)
-{
- raid6_conf_t *conf = mddev->private;
- int err = 0;
- mdk_rdev_t *rdev;
- struct disk_info *p = conf->disks + number;
-
- print_raid6_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- }
- }
-
-abort:
-
- print_raid6_conf(conf);
- return err;
-}
-
-static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
-{
- raid6_conf_t *conf = mddev->private;
- int found = 0;
- int disk;
- struct disk_info *p;
-
- if (mddev->degraded > 2)
- /* no point adding a device */
- return 0;
- /*
- * find the disk ... but prefer rdev->saved_raid_disk
- * if possible.
- */
- if (rdev->saved_raid_disk >= 0 &&
- conf->disks[rdev->saved_raid_disk].rdev == NULL)
- disk = rdev->saved_raid_disk;
- else
- disk = 0;
- for ( ; disk < mddev->raid_disks; disk++)
- if ((p=conf->disks + disk)->rdev == NULL) {
- clear_bit(In_sync, &rdev->flags);
- rdev->raid_disk = disk;
- found = 1;
- if (rdev->saved_raid_disk != disk)
- conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
- break;
- }
- print_raid6_conf(conf);
- return found;
-}
-
-static int raid6_resize(mddev_t *mddev, sector_t sectors)
-{
- /* no resync is happening, and there is enough space
- * on all devices, so we can resize.
- * We need to make sure resync covers any new space.
- * If the array is shrinking we should possibly wait until
- * any io in the removed space completes, but it hardly seems
- * worth it.
- */
- sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
- mddev->array_size = (sectors * (mddev->raid_disks-2))>>1;
- set_capacity(mddev->gendisk, mddev->array_size << 1);
- mddev->changed = 1;
- if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
- mddev->recovery_cp = mddev->size << 1;
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- }
- mddev->size = sectors /2;
- mddev->resync_max_sectors = sectors;
- return 0;
-}
-
-static void raid6_quiesce(mddev_t *mddev, int state)
-{
- raid6_conf_t *conf = mddev_to_conf(mddev);
-
- switch(state) {
- case 1: /* stop all writes */
- spin_lock_irq(&conf->device_lock);
- conf->quiesce = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- atomic_read(&conf->active_stripes) == 0,
- conf->device_lock, /* nothing */);
- spin_unlock_irq(&conf->device_lock);
- break;
-
- case 0: /* re-enable writes */
- spin_lock_irq(&conf->device_lock);
- conf->quiesce = 0;
- wake_up(&conf->wait_for_stripe);
- spin_unlock_irq(&conf->device_lock);
- break;
- }
-}
-
-static struct mdk_personality raid6_personality =
-{
- .name = "raid6",
- .level = 6,
- .owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
- .stop = stop,
- .status = status,
- .error_handler = error,
- .hot_add_disk = raid6_add_disk,
- .hot_remove_disk= raid6_remove_disk,
- .spare_active = raid6_spare_active,
- .sync_request = sync_request,
- .resize = raid6_resize,
- .quiesce = raid6_quiesce,
-};
-
-static int __init raid6_init(void)
-{
- int e;
-
- e = raid6_select_algo();
- if ( e )
- return e;
-
- return register_md_personality(&raid6_personality);
-}
-
-static void raid6_exit (void)
-{
- unregister_md_personality(&raid6_personality);
-}
-
-module_init(raid6_init);
-module_exit(raid6_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("md-personality-8"); /* RAID6 */
-MODULE_ALIAS("md-raid6");
-MODULE_ALIAS("md-level-6");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 583d151b7486..ef52e6da01ed 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -82,9 +82,6 @@ config VIDEO_IR
config VIDEO_TVEEPROM
tristate
-config VIDEO_CX2341X
- tristate
-
config USB_DABUSB
tristate "DABUSB driver"
depends on USB
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 3152a54a2539..5e8bb41a088b 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -556,22 +556,23 @@ static int dvb_frontend_thread(void *data)
}
/* do an iteration of the tuning loop */
- if (fe->ops.get_frontend_algo(fe) == FE_ALGO_HW) {
- /* have we been asked to retune? */
- params = NULL;
- if (fepriv->state & FESTATE_RETUNE) {
- params = &fepriv->parameters;
- fepriv->state = FESTATE_TUNED;
- }
+ if (fe->ops.get_frontend_algo) {
+ if (fe->ops.get_frontend_algo(fe) == FE_ALGO_HW) {
+ /* have we been asked to retune? */
+ params = NULL;
+ if (fepriv->state & FESTATE_RETUNE) {
+ params = &fepriv->parameters;
+ fepriv->state = FESTATE_TUNED;
+ }
- fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
- if (s != fepriv->status) {
- dvb_frontend_add_event(fe, s);
- fepriv->status = s;
+ fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
+ if (s != fepriv->status) {
+ dvb_frontend_add_event(fe, s);
+ fepriv->status = s;
+ }
}
- } else {
+ } else
dvb_frontend_swzigzag(fe);
- }
}
if (dvb_shutdown_timeout) {
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 8832f80c05f7..7a5c99c200e8 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -152,13 +152,9 @@ static void init_av7110_av(struct av7110 *av7110)
/* remaining inits according to card and frontend type */
av7110->analog_tuner_flags = 0;
av7110->current_input = 0;
- if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a) {
- printk("dvb-ttpci: MSP3415 audio DAC @ card %d\n",
- av7110->dvb_adapter.num);
- av7110->adac_type = DVB_ADAC_MSP34x5;
+ if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a)
av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on
- }
- else if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
+ if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
printk ("dvb-ttpci: Crystal audio DAC @ card %d detected\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_CRYSTAL;
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 2eff09f638d3..0f3a044aeb17 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -318,7 +318,17 @@ int av7110_set_volume(struct av7110 *av7110, int volleft, int volright)
msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */
return 0;
+
+ case DVB_ADAC_MSP34x5:
+ vol = (volleft > volright) ? volleft : volright;
+ val = (vol * 0x73 / 255) << 8;
+ if (vol > 0)
+ balance = ((volright - volleft) * 127) / vol;
+ msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
+ return 0;
}
+
return 0;
}
@@ -1267,23 +1277,32 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
switch(av7110->audiostate.channel_select) {
case AUDIO_STEREO:
ret = audcom(av7110, AUDIO_CMD_STEREO);
- if (!ret)
+ if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x49);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
+ }
break;
case AUDIO_MONO_LEFT:
ret = audcom(av7110, AUDIO_CMD_MONO_L);
- if (!ret)
+ if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x4a);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
+ }
break;
case AUDIO_MONO_RIGHT:
ret = audcom(av7110, AUDIO_CMD_MONO_R);
- if (!ret)
+ if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x45);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
+ }
break;
default:
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 603a22e4bfe2..64055461559d 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -42,7 +42,18 @@
int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val)
{
u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff };
- struct i2c_msg msgs = { .flags = 0, .addr = 0x40, .len = 5, .buf = msg };
+ struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg };
+
+ switch (av7110->adac_type) {
+ case DVB_ADAC_MSP34x0:
+ msgs.addr = 0x40;
+ break;
+ case DVB_ADAC_MSP34x5:
+ msgs.addr = 0x42;
+ break;
+ default:
+ return 0;
+ }
if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n",
@@ -57,10 +68,23 @@ static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val)
u8 msg1[3] = { dev, reg >> 8, reg & 0xff };
u8 msg2[2];
struct i2c_msg msgs[2] = {
- { .flags = 0, .addr = 0x40, .len = 3, .buf = msg1 },
- { .flags = I2C_M_RD, .addr = 0x40, .len = 2, .buf = msg2 }
+ { .flags = 0 , .len = 3, .buf = msg1 },
+ { .flags = I2C_M_RD, .len = 2, .buf = msg2 }
};
+ switch (av7110->adac_type) {
+ case DVB_ADAC_MSP34x0:
+ msgs[0].addr = 0x40;
+ msgs[1].addr = 0x40;
+ break;
+ case DVB_ADAC_MSP34x5:
+ msgs[0].addr = 0x42;
+ msgs[1].addr = 0x42;
+ break;
+ default:
+ return 0;
+ }
+
if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u\n",
av7110->dvb_adapter.num, reg);
@@ -678,17 +702,23 @@ int av7110_init_analog_module(struct av7110 *av7110)
{
u16 version1, version2;
- if (i2c_writereg(av7110, 0x80, 0x0, 0x80) != 1
- || i2c_writereg(av7110, 0x80, 0x0, 0) != 1)
+ if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
+ i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
+ printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
+ av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_MSP34x0;
+ } else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
+ i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
+ printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3415\n",
+ av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_MSP34x5;
+ } else
return -ENODEV;
- printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
- av7110->dvb_adapter.num);
- av7110->adac_type = DVB_ADAC_MSP34x0;
msleep(100); // the probing above resets the msp...
msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
- dprintk(1, "dvb-ttpci: @ card %d MSP3400 version 0x%04x 0x%04x\n",
+ dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n",
av7110->dvb_adapter.num, version1, version2);
msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
@@ -697,7 +727,7 @@ int av7110_init_analog_module(struct av7110 *av7110)
msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
- msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x4800); // prescale SCART
+ msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) {
INFO(("saa7113 not accessible.\n"));
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 824a63c92629..6d532f170ce5 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -381,6 +381,18 @@ config VIDEO_WM8739
To compile this driver as a module, choose M here: the
module will be called wm8739.
+config VIDEO_CX2341X
+ tristate "Conexant CX2341x MPEG encoders"
+ depends on VIDEO_V4L2 && EXPERIMENTAL
+ ---help---
+ Support for the Conexant CX23416 MPEG encoders
+ and CX23415 MPEG encoder/decoders.
+
+ This module currently supports the encoding functions only.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cx2341x.
+
source "drivers/media/video/cx25840/Kconfig"
config VIDEO_SAA711X
@@ -433,6 +445,8 @@ endmenu # encoder / decoder chips
menu "V4L USB devices"
depends on USB && VIDEO_DEV
+source "drivers/media/video/pvrusb2/Kconfig"
+
source "drivers/media/video/em28xx/Kconfig"
config USB_DSBR
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 6c401b46398a..353d61cfac1b 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
+obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 3116345c93b1..e68a6d2fff24 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -4848,7 +4848,7 @@ static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input)
*
* The IVC120G security card has 4 i2c controlled TDA8540 matrix
* swichers to provide 16 channels to MUX0. The TDA8540's have
- * 4 indepedant outputs and as such the IVC120G also has the
+ * 4 independent outputs and as such the IVC120G also has the
* optional "Monitor Out" bus. This allows the card to be looking
* at one input while the monitor is looking at another.
*
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 423e954948be..aa3203ae670c 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4019,8 +4019,9 @@ static int __devinit bttv_probe(struct pci_dev *dev,
if (!request_mem_region(pci_resource_start(dev,0),
pci_resource_len(dev,0),
btv->c.name)) {
- printk(KERN_WARNING "bttv%d: can't request iomem (0x%lx).\n",
- btv->c.nr, pci_resource_start(dev,0));
+ printk(KERN_WARNING "bttv%d: can't request iomem (0x%llx).\n",
+ btv->c.nr,
+ (unsigned long long)pci_resource_start(dev,0));
return -EBUSY;
}
pci_set_master(dev);
@@ -4031,8 +4032,9 @@ static int __devinit bttv_probe(struct pci_dev *dev,
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
printk(KERN_INFO "bttv%d: Bt%d (rev %d) at %s, ",
bttv_num,btv->id, btv->revision, pci_name(dev));
- printk("irq: %d, latency: %d, mmio: 0x%lx\n",
- btv->c.pci->irq, lat, pci_resource_start(dev,0));
+ printk("irq: %d, latency: %d, mmio: 0x%llx\n",
+ btv->c.pci->irq, lat,
+ (unsigned long long)pci_resource_start(dev,0));
schedule();
btv->bt848_mmio=ioremap(pci_resource_start(dev,0), 0x1000);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 554813e6f65d..65f00fc08fa9 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -43,6 +43,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
const u32 cx2341x_mpeg_ctrls[] = {
V4L2_CID_MPEG_CLASS,
V4L2_CID_MPEG_STREAM_TYPE,
+ V4L2_CID_MPEG_STREAM_VBI_FMT,
V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
V4L2_CID_MPEG_AUDIO_ENCODING,
V4L2_CID_MPEG_AUDIO_L2_BITRATE,
@@ -135,6 +136,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params,
case V4L2_CID_MPEG_STREAM_TYPE:
ctrl->value = params->stream_type;
break;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ ctrl->value = params->stream_vbi_fmt;
+ break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
ctrl->value = params->video_spatial_filter_mode;
break;
@@ -257,6 +261,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params,
params->video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
}
break;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ params->stream_vbi_fmt = ctrl->value;
+ break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
params->video_spatial_filter_mode = ctrl->value;
break;
@@ -418,6 +425,14 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, struct v4l2_queryctrl
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return err;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI)
+ return v4l2_ctrl_query_fill_std(qctrl);
+ return cx2341x_ctrl_query_fill(qctrl,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE, 1,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE);
+
/* CX23415/6 specific */
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
return cx2341x_ctrl_query_fill(qctrl,
@@ -586,7 +601,7 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
}
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
- struct v4l2_ext_controls *ctrls, int cmd)
+ struct v4l2_ext_controls *ctrls, unsigned int cmd)
{
int err = 0;
int i;
@@ -639,6 +654,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
{
static struct cx2341x_mpeg_params default_params = {
/* misc */
+ .capabilities = 0,
.port = CX2341X_PORT_MEMORY,
.width = 720,
.height = 480,
@@ -646,6 +662,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
/* stream */
.stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
+ .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE,
/* audio */
.audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000,
@@ -830,22 +847,22 @@ invalid:
return "<invalid>";
}
-void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
+void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix)
{
int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
/* Stream */
- printk(KERN_INFO "cx2341x-%d: Stream: %s\n",
- card_id,
+ printk(KERN_INFO "%s: Stream: %s\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
/* Video */
- printk(KERN_INFO "cx2341x-%d: Video: %dx%d, %d fps\n",
- card_id,
+ printk(KERN_INFO "%s: Video: %dx%d, %d fps\n",
+ prefix,
p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
p->is_50hz ? 25 : 30);
- printk(KERN_INFO "cx2341x-%d: Video: %s, %s, %s, %d",
- card_id,
+ printk(KERN_INFO "%s: Video: %s, %s, %s, %d",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
@@ -854,19 +871,19 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
printk(", Peak %d", p->video_bitrate_peak);
}
printk("\n");
- printk(KERN_INFO "cx2341x-%d: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n",
- card_id,
+ printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n",
+ prefix,
p->video_gop_size, p->video_b_frames,
p->video_gop_closure ? "" : "No ",
p->video_pulldown ? "" : "No ");
if (p->video_temporal_decimation) {
- printk(KERN_INFO "cx2341x-%d: Video: Temporal Decimation %d\n",
- card_id, p->video_temporal_decimation);
+ printk(KERN_INFO "%s: Video: Temporal Decimation %d\n",
+ prefix, p->video_temporal_decimation);
}
/* Audio */
- printk(KERN_INFO "cx2341x-%d: Audio: %s, %s, %s, %s",
- card_id,
+ printk(KERN_INFO "%s: Audio: %s, %s, %s, %s",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE),
@@ -880,18 +897,18 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
/* Encoding filters */
- printk(KERN_INFO "cx2341x-%d: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
- card_id,
+ printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE),
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE),
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE),
p->video_spatial_filter);
- printk(KERN_INFO "cx2341x-%d: Temporal Filter: %s, %d\n",
- card_id,
+ printk(KERN_INFO "%s: Temporal Filter: %s, %d\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE),
p->video_temporal_filter);
- printk(KERN_INFO "cx2341x-%d: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
- card_id,
+ printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE),
p->video_luma_median_filter_bottom,
p->video_luma_median_filter_top,
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 91e1c481a164..80e23ee9801c 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -11,7 +11,6 @@ config VIDEO_CX88
select VIDEO_BUF
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_CX2341X
select VIDEO_IR
---help---
This is a video4linux driver for Conexant 2388x based
@@ -36,13 +35,25 @@ config VIDEO_CX88_ALSA
To compile this driver as a module, choose M here: the
module will be called cx88-alsa.
+config VIDEO_CX88_BLACKBIRD
+ tristate "Blackbird MPEG encoder support (cx2388x + cx23416)"
+ depends on VIDEO_CX88
+ select VIDEO_CX2341X
+ ---help---
+ This adds support for MPEG encoder cards based on the
+ Blackbird reference design, using the Conexant 2388x
+ and 23416 chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cx88-blackbird.
+
config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
select VIDEO_BUF_DVB
---help---
This adds support for DVB/ATSC cards based on the
- Connexant 2388x chip.
+ Conexant 2388x chip.
To compile this driver as a module, choose M here: the
module will be called cx88-dvb.
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 0dcd09b9b727..352b919f30c4 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -3,9 +3,10 @@ cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \
cx8800-objs := cx88-video.o cx88-vbi.o
cx8802-objs := cx88-mpeg.o
-obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o cx88-blackbird.o
-obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
+obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o
obj-$(CONFIG_VIDEO_CX88_ALSA) += cx88-alsa.o
+obj-$(CONFIG_VIDEO_CX88_BLACKBIRD) += cx88-blackbird.o
+obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 2194cbeca33b..292a5e81eb75 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -712,9 +712,9 @@ static int __devinit snd_cx88_create(struct snd_card *card,
pci_read_config_byte(pci, PCI_LATENCY_TIMER, &chip->pci_lat);
dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%lx\n", core->name, devno,
+ "latency: %d, mmio: 0x%llx\n", core->name, devno,
pci_name(pci), chip->pci_rev, pci->irq,
- chip->pci_lat,pci_resource_start(pci,0));
+ chip->pci_lat,(unsigned long long)pci_resource_start(pci,0));
chip->irq = pci->irq;
synchronize_irq(chip->irq);
@@ -766,8 +766,8 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
strcpy (card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
- sprintf(card->longname, "%s at %#lx",
- card->shortname, pci_resource_start(pci, 0));
+ sprintf(card->longname, "%s at %#llx",
+ card->shortname,(unsigned long long)pci_resource_start(pci, 0));
strcpy (card->mixername, "CX88");
dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 67fd3302e8f2..4ff81582ec56 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -846,24 +846,33 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_BITS_NONE);
- cx88_do_ioctl( inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook );
+ cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl);
blackbird_initialize_codec(dev);
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
return 0;
}
+ case VIDIOC_LOG_STATUS:
+ {
+ char name[32 + 2];
+
+ snprintf(name, sizeof(name), "%s/2", core->name);
+ printk("%s/2: ============ START LOG STATUS ============\n",
+ core->name);
+ cx88_call_i2c_clients(core, VIDIOC_LOG_STATUS, 0);
+ cx2341x_log_status(&dev->params, name);
+ printk("%s/2: ============= END LOG STATUS =============\n",
+ core->name);
+ return 0;
+ }
default:
- return cx88_do_ioctl( inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook );
+ return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl);
}
return 0;
}
-int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg);
-unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
-
static unsigned int mpeg_translate_ioctl(unsigned int cmd)
{
return cmd;
@@ -872,8 +881,8 @@ static unsigned int mpeg_translate_ioctl(unsigned int cmd)
static int mpeg_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
- cmd = cx88_ioctl_translator( cmd );
- return video_usercopy(inode, file, cmd, arg, cx88_ioctl_hook);
+ cmd = mpeg_translate_ioctl( cmd );
+ return video_usercopy(inode, file, cmd, arg, mpeg_do_ioctl);
}
static int mpeg_open(struct inode *inode, struct file *file)
@@ -1119,8 +1128,6 @@ static int blackbird_init(void)
printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
#endif
- cx88_ioctl_hook = mpeg_do_ioctl;
- cx88_ioctl_translator = mpeg_translate_ioctl;
return pci_register_driver(&blackbird_pci_driver);
}
@@ -1132,9 +1139,6 @@ static void blackbird_fini(void)
module_init(blackbird_init);
module_exit(blackbird_fini);
-EXPORT_SYMBOL(cx88_ioctl_hook);
-EXPORT_SYMBOL(cx88_ioctl_translator);
-
/* ----------------------------------------------------------- */
/*
* Local variables:
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 67cdd8270863..f9d68f20dc88 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1700,11 +1700,6 @@ void cx88_card_setup(struct cx88_core *core)
/* ------------------------------------------------------------------ */
EXPORT_SYMBOL(cx88_boards);
-EXPORT_SYMBOL(cx88_bcount);
-EXPORT_SYMBOL(cx88_subids);
-EXPORT_SYMBOL(cx88_idcount);
-EXPORT_SYMBOL(cx88_card_list);
-EXPORT_SYMBOL(cx88_card_setup);
/*
* Local variables:
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index c56292d8d93b..973d3f39b2d5 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -1031,8 +1031,8 @@ static int get_ressources(struct cx88_core *core, struct pci_dev *pci)
pci_resource_len(pci,0),
core->name))
return 0;
- printk(KERN_ERR "%s: can't get MMIO memory @ 0x%lx\n",
- core->name,pci_resource_start(pci,0));
+ printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
+ core->name,(unsigned long long)pci_resource_start(pci,0));
return -EBUSY;
}
@@ -1181,8 +1181,6 @@ EXPORT_SYMBOL(cx88_set_scale);
EXPORT_SYMBOL(cx88_vdev_init);
EXPORT_SYMBOL(cx88_core_get);
EXPORT_SYMBOL(cx88_core_put);
-EXPORT_SYMBOL(cx88_start_audio_dma);
-EXPORT_SYMBOL(cx88_stop_audio_dma);
/*
* Local variables:
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 7efa6def0bde..70663805cc30 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -234,7 +234,6 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
/* ----------------------------------------------------------------------- */
EXPORT_SYMBOL(cx88_call_i2c_clients);
-EXPORT_SYMBOL(cx88_i2c_init);
/*
* Local variables:
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index a9d7795a8e14..2c12aca1b6a3 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -420,9 +420,9 @@ int cx8802_init_common(struct cx8802_dev *dev)
pci_read_config_byte(dev->pci, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(dev->pci, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s/2: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%lx\n", dev->core->name,
+ "latency: %d, mmio: 0x%llx\n", dev->core->name,
pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
- dev->pci_lat,pci_resource_start(dev->pci,0));
+ dev->pci_lat,(unsigned long long)pci_resource_start(dev->pci,0));
/* initialize driver struct */
spin_lock_init(&dev->slock);
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 1e4278b588d8..5785c3481579 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -726,7 +726,7 @@ static void set_audio_standard_FM(struct cx88_core *core,
/* ----------------------------------------------------------- */
-int cx88_detect_nicam(struct cx88_core *core)
+static int cx88_detect_nicam(struct cx88_core *core)
{
int i, j = 0;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 694d1d80ff3f..8d5cf474b68e 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -494,8 +494,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
return 0;
buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
start_video_dma(dev, q, buf);
buf->vb.state = STATE_ACTIVE;
buf->count = q->count++;
@@ -506,8 +505,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = STATE_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
@@ -1849,9 +1847,9 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%lx\n", core->name,
+ "latency: %d, mmio: 0x%llx\n", core->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,pci_resource_start(pci_dev,0));
+ dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev,0xffffffff)) {
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index dc7bc35f18f4..9a9a0fc7a41a 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -563,7 +563,6 @@ void cx88_newstation(struct cx88_core *core);
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual);
int cx88_audio_thread(void *data);
-int cx88_detect_nicam(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-input.c */
@@ -592,12 +591,6 @@ extern int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
struct cx88_core *core, unsigned int cmd,
void *arg, v4l2_kioctl driver_ioctl);
-/* ----------------------------------------------------------- */
-/* cx88-blackbird.c */
-extern int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg);
-extern unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
-
/*
* Local variables:
* c-basic-offset: 8
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
new file mode 100644
index 000000000000..7e727fe14b32
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -0,0 +1,62 @@
+config VIDEO_PVRUSB2
+ tristate "Hauppauge WinTV-PVR USB2 support"
+ depends on VIDEO_V4L2 && USB && I2C && EXPERIMENTAL
+ select FW_LOADER
+ select VIDEO_TUNER
+ select VIDEO_TVEEPROM
+ select VIDEO_CX2341X
+ select VIDEO_SAA711X
+ select VIDEO_MSP3400
+ ---help---
+ This is a video4linux driver for Conexant 23416 based
+ usb2 personal video recorder devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pvrusb2
+
+config VIDEO_PVRUSB2_24XXX
+ bool "Hauppauge WinTV-PVR USB2 support for 24xxx model series"
+ depends on VIDEO_PVRUSB2 && EXPERIMENTAL
+ select VIDEO_CX25840
+ select VIDEO_WM8775
+ ---help---
+ This option enables inclusion of additional logic to operate
+ newer WinTV-PVR USB2 devices whose model number is of the
+ form "24xxx" (leading prefix of "24" followed by 3 digits).
+ To see if you may need this option, examine the white
+ sticker on the underside of your device. Enabling this
+ option will not harm support for older devices, however it
+ is a separate option because of the experimental nature of
+ this new feature.
+
+ If you are in doubt, say N.
+
+ Note: This feature is _very_ experimental. You have been
+ warned.
+
+config VIDEO_PVRUSB2_SYSFS
+ bool "pvrusb2 sysfs support (EXPERIMENTAL)"
+ default y
+ depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL
+ ---help---
+ This option enables the operation of a sysfs based
+ interface for query and control of the pvrusb2 driver.
+
+ This is not generally needed for v4l applications,
+ although certain applications are optimized to take
+ advantage of this feature.
+
+ If you are in doubt, say Y.
+
+ Note: This feature is experimental and subject to change.
+
+config VIDEO_PVRUSB2_DEBUGIFC
+ bool "pvrusb2 debug interface"
+ depends on VIDEO_PVRUSB2_SYSFS
+ ---help---
+ This option enables the inclusion of a debug interface
+ in the pvrusb2 driver, hosted through sysfs.
+
+ You do not need to select this option unless you plan
+ on debugging the driver or performing a manual firmware
+ extraction.
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile
new file mode 100644
index 000000000000..fed603ad0a67
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Makefile
@@ -0,0 +1,18 @@
+obj-pvrusb2-sysfs-$(CONFIG_VIDEO_PVRUSB2_SYSFS) := pvrusb2-sysfs.o
+obj-pvrusb2-debugifc-$(CONFIG_VIDEO_PVRUSB2_DEBUGIFC) := pvrusb2-debugifc.o
+
+obj-pvrusb2-24xxx-$(CONFIG_VIDEO_PVRUSB2_24XXX) := \
+ pvrusb2-cx2584x-v4l.o \
+ pvrusb2-wm8775.o
+
+pvrusb2-objs := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \
+ pvrusb2-audio.o pvrusb2-i2c-chips-v4l2.o \
+ pvrusb2-encoder.o pvrusb2-video-v4l.o \
+ pvrusb2-eeprom.o pvrusb2-tuner.o pvrusb2-demod.o \
+ pvrusb2-main.o pvrusb2-hdw.o pvrusb2-v4l2.o \
+ pvrusb2-ctrl.o pvrusb2-std.o \
+ pvrusb2-context.o pvrusb2-io.o pvrusb2-ioread.o \
+ $(obj-pvrusb2-24xxx-y) \
+ $(obj-pvrusb2-sysfs-y) $(obj-pvrusb2-debugifc-y)
+
+obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
new file mode 100644
index 000000000000..313d2dcf9e4b
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -0,0 +1,204 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-audio.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/msp3400.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_msp3400_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ struct pvr2_audio_stat astat;
+ unsigned long stale_mask;
+};
+
+
+/* This function selects the correct audio input source */
+static void set_stereo(struct pvr2_msp3400_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
+
+ if (hdw->input_val == PVR2_CVAL_INPUT_TV) {
+ struct v4l2_tuner vt;
+ memset(&vt,0,sizeof(vt));
+ vt.audmode = hdw->audiomode_val;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_S_TUNER,&vt);
+ }
+
+ route.input = MSP_INPUT_DEFAULT;
+ route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
+ switch (hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ /* Assume that msp34xx also handle FM decoding, in which case
+ we're still using the tuner. */
+ /* HV: actually it is more likely to be the SCART2 input if
+ the ivtv experience is any indication. */
+ route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ /* SCART 1 input */
+ route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+
+static int check_stereo(struct pvr2_msp3400_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return (hdw->input_dirty ||
+ hdw->audiomode_dirty);
+}
+
+
+struct pvr2_msp3400_ops {
+ void (*update)(struct pvr2_msp3400_handler *);
+ int (*check)(struct pvr2_msp3400_handler *);
+};
+
+
+static const struct pvr2_msp3400_ops msp3400_ops[] = {
+ { .update = set_stereo, .check = check_stereo},
+};
+
+
+static int msp3400_check(struct pvr2_msp3400_handler *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (msp3400_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void msp3400_update(struct pvr2_msp3400_handler *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ msp3400_ops[idx].update(ctxt);
+ }
+}
+
+
+/* This reads back the current signal type */
+static int get_audio_status(struct pvr2_msp3400_handler *ctxt)
+{
+ struct v4l2_tuner vt;
+ int stat;
+
+ memset(&vt,0,sizeof(vt));
+ stat = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (stat < 0) return stat;
+
+ ctxt->hdw->flag_stereo = (vt.audmode & V4L2_TUNER_MODE_STEREO) != 0;
+ ctxt->hdw->flag_bilingual =
+ (vt.audmode & V4L2_TUNER_MODE_LANG2) != 0;
+ return 0;
+}
+
+
+static void pvr2_msp3400_detach(struct pvr2_msp3400_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->audio_stat = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int pvr2_msp3400_describe(struct pvr2_msp3400_handler *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-audio v4l2");
+}
+
+
+const static struct pvr2_i2c_handler_functions msp3400_funcs = {
+ .detach = (void (*)(void *))pvr2_msp3400_detach,
+ .check = (int (*)(void *))msp3400_check,
+ .update = (void (*)(void *))msp3400_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_msp3400_describe,
+};
+
+
+int pvr2_i2c_msp3400_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_msp3400_handler *ctxt;
+ if (hdw->audio_stat) return 0;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &msp3400_funcs;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->astat.ctxt = ctxt;
+ ctxt->astat.status = (int (*)(void *))get_audio_status;
+ ctxt->astat.detach = (void (*)(void *))pvr2_msp3400_detach;
+ ctxt->stale_mask = (1 << (sizeof(msp3400_ops)/
+ sizeof(msp3400_ops[0]))) - 1;
+ cp->handler = &ctxt->i2c_handler;
+ hdw->audio_stat = &ctxt->astat;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x msp3400 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.h b/drivers/media/video/pvrusb2/pvrusb2-audio.h
new file mode 100644
index 000000000000..536339b68843
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_AUDIO_H
+#define __PVRUSB2_AUDIO_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_msp3400_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_AUDIO_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
new file mode 100644
index 000000000000..40dc59871a45
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -0,0 +1,230 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-context.h"
+#include "pvrusb2-io.h"
+#include "pvrusb2-ioread.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+
+
+static void pvr2_context_destroy(struct pvr2_context *mp)
+{
+ if (mp->hdw) pvr2_hdw_destroy(mp->hdw);
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr_main id=%p",mp);
+ flush_workqueue(mp->workqueue);
+ destroy_workqueue(mp->workqueue);
+ kfree(mp);
+}
+
+
+static void pvr2_context_trigger_poll(struct pvr2_context *mp)
+{
+ queue_work(mp->workqueue,&mp->workpoll);
+}
+
+
+static void pvr2_context_poll(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ pvr2_hdw_poll(mp->hdw);
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+static void pvr2_context_setup(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ if (!pvr2_hdw_dev_ok(mp->hdw)) break;
+ pvr2_hdw_setup(mp->hdw);
+ pvr2_hdw_setup_poll_trigger(
+ mp->hdw,
+ (void (*)(void *))pvr2_context_trigger_poll,
+ mp);
+ if (!pvr2_hdw_dev_ok(mp->hdw)) break;
+ if (!pvr2_hdw_init_ok(mp->hdw)) break;
+ mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw);
+ if (mp->setup_func) {
+ mp->setup_func(mp);
+ }
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+struct pvr2_context *pvr2_context_create(
+ struct usb_interface *intf,
+ const struct usb_device_id *devid,
+ void (*setup_func)(struct pvr2_context *))
+{
+ struct pvr2_context *mp = 0;
+ mp = kmalloc(sizeof(*mp),GFP_KERNEL);
+ if (!mp) goto done;
+ memset(mp,0,sizeof(*mp));
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_main id=%p",mp);
+ mp->setup_func = setup_func;
+ mutex_init(&mp->mutex);
+ mp->hdw = pvr2_hdw_create(intf,devid);
+ if (!mp->hdw) {
+ pvr2_context_destroy(mp);
+ mp = 0;
+ goto done;
+ }
+
+ mp->workqueue = create_singlethread_workqueue("pvrusb2");
+ INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
+ INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+ queue_work(mp->workqueue,&mp->workinit);
+ done:
+ return mp;
+}
+
+
+void pvr2_context_enter(struct pvr2_context *mp)
+{
+ mutex_lock(&mp->mutex);
+ pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_enter(id=%p)",mp);
+}
+
+
+void pvr2_context_exit(struct pvr2_context *mp)
+{
+ int destroy_flag = 0;
+ if (!(mp->mc_first || !mp->disconnect_flag)) {
+ destroy_flag = !0;
+ }
+ pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_exit(id=%p) outside",mp);
+ mutex_unlock(&mp->mutex);
+ if (destroy_flag) pvr2_context_destroy(mp);
+}
+
+
+static void pvr2_context_run_checks(struct pvr2_context *mp)
+{
+ struct pvr2_channel *ch1,*ch2;
+ for (ch1 = mp->mc_first; ch1; ch1 = ch2) {
+ ch2 = ch1->mc_next;
+ if (ch1->check_func) {
+ ch1->check_func(ch1);
+ }
+ }
+}
+
+
+void pvr2_context_disconnect(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ pvr2_hdw_disconnect(mp->hdw);
+ mp->disconnect_flag = !0;
+ pvr2_context_run_checks(mp);
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp)
+{
+ cp->hdw = mp->hdw;
+ cp->mc_head = mp;
+ cp->mc_next = 0;
+ cp->mc_prev = mp->mc_last;
+ if (mp->mc_last) {
+ mp->mc_last->mc_next = cp;
+ } else {
+ mp->mc_first = cp;
+ }
+ mp->mc_last = cp;
+}
+
+
+static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp)
+{
+ if (!cp->stream) return;
+ pvr2_stream_kill(cp->stream->stream);
+ cp->stream->user = 0;
+ cp->stream = 0;
+}
+
+
+void pvr2_channel_done(struct pvr2_channel *cp)
+{
+ struct pvr2_context *mp = cp->mc_head;
+ pvr2_channel_disclaim_stream(cp);
+ if (cp->mc_next) {
+ cp->mc_next->mc_prev = cp->mc_prev;
+ } else {
+ mp->mc_last = cp->mc_prev;
+ }
+ if (cp->mc_prev) {
+ cp->mc_prev->mc_next = cp->mc_next;
+ } else {
+ mp->mc_first = cp->mc_next;
+ }
+ cp->hdw = 0;
+}
+
+
+int pvr2_channel_claim_stream(struct pvr2_channel *cp,
+ struct pvr2_context_stream *sp)
+{
+ int code = 0;
+ pvr2_context_enter(cp->mc_head); do {
+ if (sp == cp->stream) break;
+ if (sp->user) {
+ code = -EBUSY;
+ break;
+ }
+ pvr2_channel_disclaim_stream(cp);
+ if (!sp) break;
+ sp->user = cp;
+ cp->stream = sp;
+ } while (0); pvr2_context_exit(cp->mc_head);
+ return code;
+}
+
+
+// This is the marker for the real beginning of a legitimate mpeg2 stream.
+static char stream_sync_key[] = {
+ 0x00, 0x00, 0x01, 0xba,
+};
+
+struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
+ struct pvr2_context_stream *sp)
+{
+ struct pvr2_ioread *cp;
+ cp = pvr2_ioread_create();
+ if (!cp) return 0;
+ pvr2_ioread_setup(cp,sp->stream);
+ pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key));
+ return cp;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.h b/drivers/media/video/pvrusb2/pvrusb2-context.h
new file mode 100644
index 000000000000..6327fa1f7e4f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.h
@@ -0,0 +1,92 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_BASE_H
+#define __PVRUSB2_BASE_H
+
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+
+struct pvr2_hdw; /* hardware interface - defined elsewhere */
+struct pvr2_stream; /* stream interface - defined elsewhere */
+
+struct pvr2_context; /* All central state */
+struct pvr2_channel; /* One I/O pathway to a user */
+struct pvr2_context_stream; /* Wrapper for a stream */
+struct pvr2_crit_reg; /* Critical region pointer */
+struct pvr2_ioread; /* Low level stream structure */
+
+struct pvr2_context_stream {
+ struct pvr2_channel *user;
+ struct pvr2_stream *stream;
+};
+
+struct pvr2_context {
+ struct pvr2_channel *mc_first;
+ struct pvr2_channel *mc_last;
+ struct pvr2_hdw *hdw;
+ struct pvr2_context_stream video_stream;
+ struct mutex mutex;
+ int disconnect_flag;
+
+ /* Called after pvr2_context initialization is complete */
+ void (*setup_func)(struct pvr2_context *);
+
+ /* Work queue overhead for out-of-line processing */
+ struct workqueue_struct *workqueue;
+ struct work_struct workinit;
+ struct work_struct workpoll;
+};
+
+struct pvr2_channel {
+ struct pvr2_context *mc_head;
+ struct pvr2_channel *mc_next;
+ struct pvr2_channel *mc_prev;
+ struct pvr2_context_stream *stream;
+ struct pvr2_hdw *hdw;
+ void (*check_func)(struct pvr2_channel *);
+};
+
+void pvr2_context_enter(struct pvr2_context *);
+void pvr2_context_exit(struct pvr2_context *);
+
+struct pvr2_context *pvr2_context_create(struct usb_interface *intf,
+ const struct usb_device_id *devid,
+ void (*setup_func)(struct pvr2_context *));
+void pvr2_context_disconnect(struct pvr2_context *);
+
+void pvr2_channel_init(struct pvr2_channel *,struct pvr2_context *);
+void pvr2_channel_done(struct pvr2_channel *);
+int pvr2_channel_claim_stream(struct pvr2_channel *,
+ struct pvr2_context_stream *);
+struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
+ struct pvr2_context_stream *);
+
+
+#endif /* __PVRUSB2_CONTEXT_H */
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
new file mode 100644
index 000000000000..d5df9fbeba2f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -0,0 +1,593 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-ctrl.h"
+#include "pvrusb2-hdw-internal.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+
+
+/* Set the given control. */
+int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val)
+{
+ return pvr2_ctrl_set_mask_value(cptr,~0,val);
+}
+
+
+/* Set/clear specific bits of the given control. */
+int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val)
+{
+ int ret = 0;
+ if (!cptr) return -EINVAL;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->set_value != 0) {
+ if (cptr->info->type == pvr2_ctl_bitmask) {
+ mask &= cptr->info->def.type_bitmask.valid_bits;
+ } else if (cptr->info->type == pvr2_ctl_int) {
+ if (val < cptr->info->def.type_int.min_value) {
+ break;
+ }
+ if (val > cptr->info->def.type_int.max_value) {
+ break;
+ }
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ if (val >= cptr->info->def.type_enum.count) {
+ break;
+ }
+ } else if (cptr->info->type != pvr2_ctl_bool) {
+ break;
+ }
+ ret = cptr->info->set_value(cptr,mask,val);
+ } else {
+ ret = -EPERM;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Get the current value of the given control. */
+int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr)
+{
+ int ret = 0;
+ if (!cptr) return -EINVAL;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ ret = cptr->info->get_value(cptr,valptr);
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's type */
+enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return pvr2_ctl_int;
+ return cptr->info->type;
+}
+
+
+/* Retrieve control's maximum value (int type) */
+int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->def.type_int.max_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's minimum value (int type) */
+int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->def.type_int.min_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's default value (any type) */
+int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->default_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's enumeration count (enum only) */
+int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_enum) {
+ ret = cptr->info->def.type_enum.count;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's valid mask bits (bit mask only) */
+int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_bitmask) {
+ ret = cptr->info->def.type_bitmask.valid_bits;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve the control's name */
+const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->name;
+}
+
+
+/* Retrieve the control's desc */
+const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->desc;
+}
+
+
+/* Retrieve a control enumeration or bit mask value */
+int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val,
+ char *bptr,unsigned int bmax,
+ unsigned int *blen)
+{
+ int ret = -EINVAL;
+ if (!cptr) return 0;
+ *blen = 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_enum) {
+ const char **names;
+ names = cptr->info->def.type_enum.value_names;
+ if ((val >= 0) &&
+ (val < cptr->info->def.type_enum.count)) {
+ if (names[val]) {
+ *blen = scnprintf(
+ bptr,bmax,"%s",
+ names[val]);
+ } else {
+ *blen = 0;
+ }
+ ret = 0;
+ }
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ const char **names;
+ unsigned int idx;
+ int msk;
+ names = cptr->info->def.type_bitmask.bit_names;
+ val &= cptr->info->def.type_bitmask.valid_bits;
+ for (idx = 0, msk = 1; val; idx++, msk <<= 1) {
+ if (val & msk) {
+ *blen = scnprintf(bptr,bmax,"%s",
+ names[idx]);
+ ret = 0;
+ break;
+ }
+ }
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Return V4L ID for this control or zero if none */
+int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->v4l_id;
+}
+
+
+unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr)
+{
+ unsigned int flags = 0;
+
+ if (cptr->info->get_v4lflags) {
+ flags = cptr->info->get_v4lflags(cptr);
+ }
+
+ if (cptr->info->set_value) {
+ flags &= ~V4L2_CTRL_FLAG_READ_ONLY;
+ } else {
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ return flags;
+}
+
+
+/* Return true if control is writable */
+int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->set_value != 0;
+}
+
+
+/* Return true if control has custom symbolic representation */
+int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ if (!cptr->info->val_to_sym) return 0;
+ if (!cptr->info->sym_to_val) return 0;
+ return !0;
+}
+
+
+/* Convert a given mask/val to a custom symbolic value */
+int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ if (!cptr) return -EINVAL;
+ if (!cptr->info->val_to_sym) return -EINVAL;
+ return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len);
+}
+
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr)
+{
+ if (!cptr) return -EINVAL;
+ if (!cptr->info->sym_to_val) return -EINVAL;
+ return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr);
+}
+
+
+static unsigned int gen_bitmask_string(int msk,int val,int msk_only,
+ const char **names,
+ char *ptr,unsigned int len)
+{
+ unsigned int idx;
+ long sm,um;
+ int spcFl;
+ unsigned int uc,cnt;
+ const char *idStr;
+
+ spcFl = 0;
+ uc = 0;
+ um = 0;
+ for (idx = 0, sm = 1; msk; idx++, sm <<= 1) {
+ if (sm & msk) {
+ msk &= ~sm;
+ idStr = names[idx];
+ if (idStr) {
+ cnt = scnprintf(ptr,len,"%s%s%s",
+ (spcFl ? " " : ""),
+ (msk_only ? "" :
+ ((val & sm) ? "+" : "-")),
+ idStr);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else {
+ um |= sm;
+ }
+ }
+ }
+ if (um) {
+ if (msk_only) {
+ cnt = scnprintf(ptr,len,"%s0x%lx",
+ (spcFl ? " " : ""),
+ um);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else if (um & val) {
+ cnt = scnprintf(ptr,len,"%s+0x%lx",
+ (spcFl ? " " : ""),
+ um & val);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else if (um & ~val) {
+ cnt = scnprintf(ptr,len,"%s+0x%lx",
+ (spcFl ? " " : ""),
+ um & ~val);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ }
+ }
+ return uc;
+}
+
+
+static const char *boolNames[] = {
+ "false",
+ "true",
+ "no",
+ "yes",
+};
+
+
+static int parse_token(const char *ptr,unsigned int len,
+ int *valptr,
+ const char **names,unsigned int namecnt)
+{
+ char buf[33];
+ unsigned int slen;
+ unsigned int idx;
+ int negfl;
+ char *p2;
+ *valptr = 0;
+ if (!names) namecnt = 0;
+ for (idx = 0; idx < namecnt; idx++) {
+ if (!names[idx]) continue;
+ slen = strlen(names[idx]);
+ if (slen != len) continue;
+ if (memcmp(names[idx],ptr,slen)) continue;
+ *valptr = idx;
+ return 0;
+ }
+ negfl = 0;
+ if ((*ptr == '-') || (*ptr == '+')) {
+ negfl = (*ptr == '-');
+ ptr++; len--;
+ }
+ if (len >= sizeof(buf)) return -EINVAL;
+ memcpy(buf,ptr,len);
+ buf[len] = 0;
+ *valptr = simple_strtol(buf,&p2,0);
+ if (negfl) *valptr = -(*valptr);
+ if (*p2) return -EINVAL;
+ return 1;
+}
+
+
+static int parse_mtoken(const char *ptr,unsigned int len,
+ int *valptr,
+ const char **names,int valid_bits)
+{
+ char buf[33];
+ unsigned int slen;
+ unsigned int idx;
+ char *p2;
+ int msk;
+ *valptr = 0;
+ for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
+ if (!msk & valid_bits) continue;
+ valid_bits &= ~msk;
+ if (!names[idx]) continue;
+ slen = strlen(names[idx]);
+ if (slen != len) continue;
+ if (memcmp(names[idx],ptr,slen)) continue;
+ *valptr = msk;
+ return 0;
+ }
+ if (len >= sizeof(buf)) return -EINVAL;
+ memcpy(buf,ptr,len);
+ buf[len] = 0;
+ *valptr = simple_strtol(buf,&p2,0);
+ if (*p2) return -EINVAL;
+ return 0;
+}
+
+
+static int parse_tlist(const char *ptr,unsigned int len,
+ int *maskptr,int *valptr,
+ const char **names,int valid_bits)
+{
+ unsigned int cnt;
+ int mask,val,kv,mode,ret;
+ mask = 0;
+ val = 0;
+ ret = 0;
+ while (len) {
+ cnt = 0;
+ while ((cnt < len) &&
+ ((ptr[cnt] <= 32) ||
+ (ptr[cnt] >= 127))) cnt++;
+ ptr += cnt;
+ len -= cnt;
+ mode = 0;
+ if ((*ptr == '-') || (*ptr == '+')) {
+ mode = (*ptr == '-') ? -1 : 1;
+ ptr++;
+ len--;
+ }
+ cnt = 0;
+ while (cnt < len) {
+ if (ptr[cnt] <= 32) break;
+ if (ptr[cnt] >= 127) break;
+ cnt++;
+ }
+ if (!cnt) break;
+ if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) {
+ ret = -EINVAL;
+ break;
+ }
+ ptr += cnt;
+ len -= cnt;
+ switch (mode) {
+ case 0:
+ mask = valid_bits;
+ val |= kv;
+ break;
+ case -1:
+ mask |= kv;
+ val &= ~kv;
+ break;
+ case 1:
+ mask |= kv;
+ val |= kv;
+ break;
+ default:
+ break;
+ }
+ }
+ *maskptr = mask;
+ *valptr = val;
+ return ret;
+}
+
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
+ const char *ptr,unsigned int len,
+ int *maskptr,int *valptr)
+{
+ int ret = -EINVAL;
+ unsigned int cnt;
+
+ *maskptr = 0;
+ *valptr = 0;
+
+ cnt = 0;
+ while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++;
+ len -= cnt; ptr += cnt;
+ cnt = 0;
+ while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) ||
+ (ptr[len-(cnt+1)] >= 127))) cnt++;
+ len -= cnt;
+
+ if (!len) return -EINVAL;
+
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = parse_token(ptr,len,valptr,0,0);
+ if ((ret >= 0) &&
+ ((*valptr < cptr->info->def.type_int.min_value) ||
+ (*valptr > cptr->info->def.type_int.max_value))) {
+ ret = -ERANGE;
+ }
+ if (maskptr) *maskptr = ~0;
+ } else if (cptr->info->type == pvr2_ctl_bool) {
+ ret = parse_token(
+ ptr,len,valptr,boolNames,
+ sizeof(boolNames)/sizeof(boolNames[0]));
+ if (ret == 1) {
+ *valptr = *valptr ? !0 : 0;
+ } else if (ret == 0) {
+ *valptr = (*valptr & 1) ? !0 : 0;
+ }
+ if (maskptr) *maskptr = 1;
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ ret = parse_token(
+ ptr,len,valptr,
+ cptr->info->def.type_enum.value_names,
+ cptr->info->def.type_enum.count);
+ if ((ret >= 0) &&
+ ((*valptr < 0) ||
+ (*valptr >= cptr->info->def.type_enum.count))) {
+ ret = -ERANGE;
+ }
+ if (maskptr) *maskptr = ~0;
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ ret = parse_tlist(
+ ptr,len,maskptr,valptr,
+ cptr->info->def.type_bitmask.bit_names,
+ cptr->info->def.type_bitmask.valid_bits);
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ int ret = -EINVAL;
+
+ *len = 0;
+ if (cptr->info->type == pvr2_ctl_int) {
+ *len = scnprintf(buf,maxlen,"%d",val);
+ ret = 0;
+ } else if (cptr->info->type == pvr2_ctl_bool) {
+ *len = scnprintf(buf,maxlen,"%s",val ? "true" : "false");
+ ret = 0;
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ const char **names;
+ names = cptr->info->def.type_enum.value_names;
+ if ((val >= 0) &&
+ (val < cptr->info->def.type_enum.count)) {
+ if (names[val]) {
+ *len = scnprintf(
+ buf,maxlen,"%s",
+ names[val]);
+ } else {
+ *len = 0;
+ }
+ ret = 0;
+ }
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ *len = gen_bitmask_string(
+ val & mask & cptr->info->def.type_bitmask.valid_bits,
+ ~0,!0,
+ cptr->info->def.type_bitmask.bit_names,
+ buf,maxlen);
+ }
+ return ret;
+}
+
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ int ret;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val,
+ buf,maxlen,len);
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.h b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
new file mode 100644
index 000000000000..c1680053cd64
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_CTRL_H
+#define __PVRUSB2_CTRL_H
+
+struct pvr2_ctrl;
+
+enum pvr2_ctl_type {
+ pvr2_ctl_int = 0,
+ pvr2_ctl_enum = 1,
+ pvr2_ctl_bitmask = 2,
+ pvr2_ctl_bool = 3,
+};
+
+
+/* Set the given control. */
+int pvr2_ctrl_set_value(struct pvr2_ctrl *,int val);
+
+/* Set/clear specific bits of the given control. */
+int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *,int mask,int val);
+
+/* Get the current value of the given control. */
+int pvr2_ctrl_get_value(struct pvr2_ctrl *,int *valptr);
+
+/* Retrieve control's type */
+enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *);
+
+/* Retrieve control's maximum value (int type) */
+int pvr2_ctrl_get_max(struct pvr2_ctrl *);
+
+/* Retrieve control's minimum value (int type) */
+int pvr2_ctrl_get_min(struct pvr2_ctrl *);
+
+/* Retrieve control's default value (any type) */
+int pvr2_ctrl_get_def(struct pvr2_ctrl *);
+
+/* Retrieve control's enumeration count (enum only) */
+int pvr2_ctrl_get_cnt(struct pvr2_ctrl *);
+
+/* Retrieve control's valid mask bits (bit mask only) */
+int pvr2_ctrl_get_mask(struct pvr2_ctrl *);
+
+/* Retrieve the control's name */
+const char *pvr2_ctrl_get_name(struct pvr2_ctrl *);
+
+/* Retrieve the control's desc */
+const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *);
+
+/* Retrieve a control enumeration or bit mask value */
+int pvr2_ctrl_get_valname(struct pvr2_ctrl *,int,char *,unsigned int,
+ unsigned int *);
+
+/* Return true if control is writable */
+int pvr2_ctrl_is_writable(struct pvr2_ctrl *);
+
+/* Return V4L flags value for control (or zero if there is no v4l control
+ actually under this control) */
+unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *);
+
+/* Return V4L ID for this control or zero if none */
+int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *);
+
+/* Return true if control has custom symbolic representation */
+int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *);
+
+/* Convert a given mask/val to a custom symbolic value */
+int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr);
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr);
+
+/* Convert a given mask/val to a symbolic value - must already be
+ inside of critical region. */
+int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+#endif /* __PVRUSB2_CTRL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
new file mode 100644
index 000000000000..27eadaff75a0
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -0,0 +1,279 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ cx2584x, in kernels 2.6.16 or newer.
+
+*/
+
+#include "pvrusb2-cx2584x-v4l.h"
+#include "pvrusb2-video-v4l.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <media/cx25840.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_cx2584x {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_decoder_ctrl ctrl;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+ enum cx25840_video_input vid_input;
+ enum cx25840_audio_input aud_input;
+
+ memset(&route,0,sizeof(route));
+
+ switch(hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ vid_input = CX25840_COMPOSITE7;
+ aud_input = CX25840_AUDIO8;
+ break;
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ vid_input = CX25840_COMPOSITE3;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ vid_input = CX25840_SVIDEO1;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ default:
+ // Just set it to be composite input for now...
+ vid_input = CX25840_COMPOSITE3;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_input vid=0x%x aud=0x%x",
+ vid_input,aud_input);
+ route.input = (u32)vid_input;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
+ route.input = (u32)aud_input;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+
+static int check_input(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+static void set_audio(struct pvr2_v4l_cx2584x *ctxt)
+{
+ u32 val;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_audio %d",
+ hdw->srate_val);
+ switch (hdw->srate_val) {
+ default:
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
+ val = 48000;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
+ val = 44100;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
+ val = 32000;
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
+}
+
+
+static int check_audio(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->srate_dirty != 0;
+}
+
+
+struct pvr2_v4l_cx2584x_ops {
+ void (*update)(struct pvr2_v4l_cx2584x *);
+ int (*check)(struct pvr2_v4l_cx2584x *);
+};
+
+
+static const struct pvr2_v4l_cx2584x_ops decoder_ops[] = {
+ { .update = set_input, .check = check_input},
+ { .update = set_audio, .check = check_audio},
+};
+
+
+static void decoder_detach(struct pvr2_v4l_cx2584x *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->decoder_ctrl = 0;
+ kfree(ctxt);
+}
+
+
+static int decoder_check(struct pvr2_v4l_cx2584x *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (decoder_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void decoder_update(struct pvr2_v4l_cx2584x *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ decoder_ops[idx].update(ctxt);
+ }
+}
+
+
+static void decoder_enable(struct pvr2_v4l_cx2584x *ctxt,int fl)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_enable(%d)",fl);
+ pvr2_v4l2_cmd_stream(ctxt->client,fl);
+}
+
+
+static int decoder_detect(struct pvr2_i2c_client *cp)
+{
+ int ret;
+ /* Attempt to query the decoder - let's see if it will answer */
+ struct v4l2_queryctrl qc;
+
+ memset(&qc,0,sizeof(qc));
+
+ qc.id = V4L2_CID_BRIGHTNESS;
+
+ ret = pvr2_i2c_client_cmd(cp,VIDIOC_QUERYCTRL,&qc);
+ return ret == 0; /* Return true if it answered */
+}
+
+
+static int decoder_is_tuned(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (ret < 0) return -EINVAL;
+ return vt.signal ? 1 : 0;
+}
+
+
+static unsigned int decoder_describe(struct pvr2_v4l_cx2584x *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-cx2584x-v4l");
+}
+
+
+static void decoder_reset(struct pvr2_v4l_cx2584x *ctxt)
+{
+ int ret;
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_RESET,0);
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_reset (ret=%d)",ret);
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))decoder_detach,
+ .check = (int (*)(void *))decoder_check,
+ .update = (void (*)(void *))decoder_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
+};
+
+
+int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *hdw,
+ struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_cx2584x *ctxt;
+
+ if (hdw->decoder_ctrl) return 0;
+ if (cp->handler) return 0;
+ if (!decoder_detect(cp)) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->ctrl.ctxt = ctxt;
+ ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
+ ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
+ ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
+ ctxt->ctrl.force_reset = (void (*)(void*))decoder_reset;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
+ sizeof(decoder_ops[0]))) - 1;
+ hdw->decoder_ctrl = &ctxt->ctrl;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
new file mode 100644
index 000000000000..54b2844e7a71
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_CX2584X_V4L_H
+#define __PVRUSB2_CX2584X_V4L_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which handles combined device audio & video processing.
+ This interface is used internally by the driver; higher level code
+ should only interact through the interface provided by
+ pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_CX2584X_V4L_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debug.h b/drivers/media/video/pvrusb2/pvrusb2-debug.h
new file mode 100644
index 000000000000..d95a8588e4f8
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debug.h
@@ -0,0 +1,67 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEBUG_H
+#define __PVRUSB2_DEBUG_H
+
+extern int pvrusb2_debug;
+
+#define pvr2_trace(msk, fmt, arg...) do {if(msk & pvrusb2_debug) printk(KERN_INFO "pvrusb2: " fmt "\n", ##arg); } while (0)
+
+/* These are listed in *rough* order of decreasing usefulness and
+ increasing noise level. */
+#define PVR2_TRACE_INFO (1 << 0) // Normal messages
+#define PVR2_TRACE_ERROR_LEGS (1 << 1) // error messages
+#define PVR2_TRACE_TOLERANCE (1 << 2) // track tolerance-affected errors
+#define PVR2_TRACE_TRAP (1 << 3) // Trap & report misbehavior from app
+#define PVR2_TRACE_INIT (1 << 4) // misc initialization steps
+#define PVR2_TRACE_START_STOP (1 << 5) // Streaming start / stop
+#define PVR2_TRACE_CTL (1 << 6) // commit of control changes
+#define PVR2_TRACE_DEBUG (1 << 7) // Temporary debug code
+#define PVR2_TRACE_EEPROM (1 << 8) // eeprom parsing / report
+#define PVR2_TRACE_STRUCT (1 << 9) // internal struct creation
+#define PVR2_TRACE_OPEN_CLOSE (1 << 10) // application open / close
+#define PVR2_TRACE_CREG (1 << 11) // Main critical region entry / exit
+#define PVR2_TRACE_SYSFS (1 << 12) // Sysfs driven I/O
+#define PVR2_TRACE_FIRMWARE (1 << 13) // firmware upload actions
+#define PVR2_TRACE_CHIPS (1 << 14) // chip broadcast operation
+#define PVR2_TRACE_I2C (1 << 15) // I2C related stuff
+#define PVR2_TRACE_I2C_CMD (1 << 16) // Software commands to I2C modules
+#define PVR2_TRACE_I2C_CORE (1 << 17) // I2C core debugging
+#define PVR2_TRACE_I2C_TRAF (1 << 18) // I2C traffic through the adapter
+#define PVR2_TRACE_V4LIOCTL (1 << 19) // v4l ioctl details
+#define PVR2_TRACE_ENCODER (1 << 20) // mpeg2 encoder operation
+#define PVR2_TRACE_BUF_POOL (1 << 21) // Track buffer pool management
+#define PVR2_TRACE_BUF_FLOW (1 << 22) // Track buffer flow in system
+#define PVR2_TRACE_DATA_FLOW (1 << 23) // Track data flow
+#define PVR2_TRACE_DEBUGIFC (1 << 24) // Debug interface actions
+#define PVR2_TRACE_GPIO (1 << 25) // GPIO state bit changes
+
+
+#endif /* __PVRUSB2_HDW_INTERNAL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
new file mode 100644
index 000000000000..586900e365ff
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
@@ -0,0 +1,478 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "pvrusb2-debugifc.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-i2c-core.h"
+
+struct debugifc_mask_item {
+ const char *name;
+ unsigned long msk;
+};
+
+static struct debugifc_mask_item mask_items[] = {
+ {"ENC_FIRMWARE",(1<<PVR2_SUBSYS_B_ENC_FIRMWARE)},
+ {"ENC_CFG",(1<<PVR2_SUBSYS_B_ENC_CFG)},
+ {"DIG_RUN",(1<<PVR2_SUBSYS_B_DIGITIZER_RUN)},
+ {"USB_RUN",(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)},
+ {"ENC_RUN",(1<<PVR2_SUBSYS_B_ENC_RUN)},
+};
+
+
+static unsigned int debugifc_count_whitespace(const char *buf,
+ unsigned int count)
+{
+ unsigned int scnt;
+ char ch;
+
+ for (scnt = 0; scnt < count; scnt++) {
+ ch = buf[scnt];
+ if (ch == ' ') continue;
+ if (ch == '\t') continue;
+ if (ch == '\n') continue;
+ break;
+ }
+ return scnt;
+}
+
+
+static unsigned int debugifc_count_nonwhitespace(const char *buf,
+ unsigned int count)
+{
+ unsigned int scnt;
+ char ch;
+
+ for (scnt = 0; scnt < count; scnt++) {
+ ch = buf[scnt];
+ if (ch == ' ') break;
+ if (ch == '\t') break;
+ if (ch == '\n') break;
+ }
+ return scnt;
+}
+
+
+static unsigned int debugifc_isolate_word(const char *buf,unsigned int count,
+ const char **wstrPtr,
+ unsigned int *wlenPtr)
+{
+ const char *wptr;
+ unsigned int consume_cnt = 0;
+ unsigned int wlen;
+ unsigned int scnt;
+
+ wptr = 0;
+ wlen = 0;
+ scnt = debugifc_count_whitespace(buf,count);
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+ if (!count) goto done;
+
+ scnt = debugifc_count_nonwhitespace(buf,count);
+ if (!scnt) goto done;
+ wptr = buf;
+ wlen = scnt;
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+
+ done:
+ *wstrPtr = wptr;
+ *wlenPtr = wlen;
+ return consume_cnt;
+}
+
+
+static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
+ u32 *num_ptr)
+{
+ u32 result = 0;
+ u32 val;
+ int ch;
+ int radix = 10;
+ if ((count >= 2) && (buf[0] == '0') &&
+ ((buf[1] == 'x') || (buf[1] == 'X'))) {
+ radix = 16;
+ count -= 2;
+ buf += 2;
+ } else if ((count >= 1) && (buf[0] == '0')) {
+ radix = 8;
+ }
+
+ while (count--) {
+ ch = *buf++;
+ if ((ch >= '0') && (ch <= '9')) {
+ val = ch - '0';
+ } else if ((ch >= 'a') && (ch <= 'f')) {
+ val = ch - 'a' + 10;
+ } else if ((ch >= 'A') && (ch <= 'F')) {
+ val = ch - 'A' + 10;
+ } else {
+ return -EINVAL;
+ }
+ if (val >= radix) return -EINVAL;
+ result *= radix;
+ result += val;
+ }
+ *num_ptr = result;
+ return 0;
+}
+
+
+static int debugifc_match_keyword(const char *buf,unsigned int count,
+ const char *keyword)
+{
+ unsigned int kl;
+ if (!keyword) return 0;
+ kl = strlen(keyword);
+ if (kl != count) return 0;
+ return !memcmp(buf,keyword,kl);
+}
+
+
+static unsigned long debugifc_find_mask(const char *buf,unsigned int count)
+{
+ struct debugifc_mask_item *mip;
+ unsigned int idx;
+ for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
+ mip = mask_items + idx;
+ if (debugifc_match_keyword(buf,count,mip->name)) {
+ return mip->msk;
+ }
+ }
+ return 0;
+}
+
+
+static int debugifc_print_mask(char *buf,unsigned int sz,
+ unsigned long msk,unsigned long val)
+{
+ struct debugifc_mask_item *mip;
+ unsigned int idx;
+ int bcnt = 0;
+ int ccnt;
+ for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
+ mip = mask_items + idx;
+ if (!(mip->msk & msk)) continue;
+ ccnt = scnprintf(buf,sz,"%s%c%s",
+ (bcnt ? " " : ""),
+ ((mip->msk & val) ? '+' : '-'),
+ mip->name);
+ sz -= ccnt;
+ buf += ccnt;
+ bcnt += ccnt;
+ }
+ return bcnt;
+}
+
+static unsigned int debugifc_parse_subsys_mask(const char *buf,
+ unsigned int count,
+ unsigned long *mskPtr,
+ unsigned long *valPtr)
+{
+ const char *wptr;
+ unsigned int consume_cnt = 0;
+ unsigned int scnt;
+ unsigned int wlen;
+ int mode;
+ unsigned long m1,msk,val;
+
+ msk = 0;
+ val = 0;
+
+ while (count) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) break;
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+ if (!wptr) break;
+
+ mode = 0;
+ if (wlen) switch (wptr[0]) {
+ case '+':
+ wptr++;
+ wlen--;
+ break;
+ case '-':
+ mode = 1;
+ wptr++;
+ wlen--;
+ break;
+ }
+ if (!wlen) continue;
+ m1 = debugifc_find_mask(wptr,wlen);
+ if (!m1) break;
+ msk |= m1;
+ if (!mode) val |= m1;
+ }
+ *mskPtr = msk;
+ *valPtr = val;
+ return consume_cnt;
+}
+
+
+int pvr2_debugifc_print_info(struct pvr2_hdw *hdw,char *buf,unsigned int acnt)
+{
+ int bcnt = 0;
+ int ccnt;
+ struct pvr2_hdw_debug_info dbg;
+
+ pvr2_hdw_get_debug_info(hdw,&dbg);
+
+ ccnt = scnprintf(buf,acnt,"big lock %s; ctl lock %s",
+ (dbg.big_lock_held ? "held" : "free"),
+ (dbg.ctl_lock_held ? "held" : "free"));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ if (dbg.ctl_lock_held) {
+ ccnt = scnprintf(buf,acnt,"; cmd_state=%d cmd_code=%d"
+ " cmd_wlen=%d cmd_rlen=%d"
+ " wpend=%d rpend=%d tmout=%d rstatus=%d"
+ " wstatus=%d",
+ dbg.cmd_debug_state,dbg.cmd_code,
+ dbg.cmd_debug_write_len,
+ dbg.cmd_debug_read_len,
+ dbg.cmd_debug_write_pend,
+ dbg.cmd_debug_read_pend,
+ dbg.cmd_debug_timeout,
+ dbg.cmd_debug_rstatus,
+ dbg.cmd_debug_wstatus);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ }
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(
+ buf,acnt,"driver flags: %s %s %s\n",
+ (dbg.flag_init_ok ? "initialized" : "uninitialized"),
+ (dbg.flag_ok ? "ok" : "fail"),
+ (dbg.flag_disconnected ? "disconnected" : "connected"));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,dbg.subsys_flags,dbg.subsys_flags);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,~dbg.subsys_flags,dbg.subsys_flags);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ ccnt = scnprintf(buf,acnt,"Attached I2C modules:\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = pvr2_i2c_report(hdw,buf,acnt);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ return bcnt;
+}
+
+
+int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
+ char *buf,unsigned int acnt)
+{
+ int bcnt = 0;
+ int ccnt;
+ unsigned long msk;
+ int ret;
+ u32 gpio_dir,gpio_in,gpio_out;
+
+ ret = pvr2_hdw_is_hsm(hdw);
+ ccnt = scnprintf(buf,acnt,"USB link speed: %s\n",
+ (ret < 0 ? "FAIL" : (ret ? "high" : "full")));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ gpio_dir = 0; gpio_in = 0; gpio_out = 0;
+ pvr2_hdw_gpio_get_dir(hdw,&gpio_dir);
+ pvr2_hdw_gpio_get_out(hdw,&gpio_out);
+ pvr2_hdw_gpio_get_in(hdw,&gpio_in);
+ ccnt = scnprintf(buf,acnt,"GPIO state: dir=0x%x in=0x%x out=0x%x\n",
+ gpio_dir,gpio_in,gpio_out);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ ccnt = scnprintf(buf,acnt,"Streaming is %s\n",
+ pvr2_hdw_get_streaming(hdw) ? "on" : "off");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ msk = pvr2_hdw_subsys_get(hdw);
+ ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,~msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ msk = pvr2_hdw_subsys_stream_get(hdw);
+ ccnt = scnprintf(buf,acnt,"Subsystems stopped on stream shutdown: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ return bcnt;
+}
+
+
+int pvr2_debugifc_do1cmd(struct pvr2_hdw *hdw,const char *buf,
+ unsigned int count)
+{
+ const char *wptr;
+ unsigned int wlen;
+ unsigned int scnt;
+
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return 0;
+ count -= scnt; buf += scnt;
+ if (!wptr) return 0;
+
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr);
+ if (debugifc_match_keyword(wptr,wlen,"reset")) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"cpu")) {
+ pvr2_hdw_cpureset_assert(hdw,!0);
+ pvr2_hdw_cpureset_assert(hdw,0);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"bus")) {
+ pvr2_hdw_device_reset(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"soft")) {
+ return pvr2_hdw_cmd_powerup(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"deep")) {
+ return pvr2_hdw_cmd_deep_reset(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"firmware")) {
+ return pvr2_upload_firmware2(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"decoder")) {
+ return pvr2_hdw_cmd_decoder_reset(hdw);
+ }
+ return -EINVAL;
+ } else if (debugifc_match_keyword(wptr,wlen,"subsys_flags")) {
+ unsigned long msk = 0;
+ unsigned long val = 0;
+ if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc parse error on subsys mask");
+ return -EINVAL;
+ }
+ pvr2_hdw_subsys_bit_chg(hdw,msk,val);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"stream_flags")) {
+ unsigned long msk = 0;
+ unsigned long val = 0;
+ if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc parse error on stream mask");
+ return -EINVAL;
+ }
+ pvr2_hdw_subsys_stream_bit_chg(hdw,msk,val);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"cpufw")) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"fetch")) {
+ pvr2_hdw_cpufw_set_enabled(hdw,!0);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"done")) {
+ pvr2_hdw_cpufw_set_enabled(hdw,0);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else if (debugifc_match_keyword(wptr,wlen,"gpio")) {
+ int dir_fl = 0;
+ int ret;
+ u32 msk,val;
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"dir")) {
+ dir_fl = !0;
+ } else if (!debugifc_match_keyword(wptr,wlen,"out")) {
+ return -EINVAL;
+ }
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ ret = debugifc_parse_unsigned_number(wptr,wlen,&msk);
+ if (ret) return ret;
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (wptr) {
+ ret = debugifc_parse_unsigned_number(wptr,wlen,&val);
+ if (ret) return ret;
+ } else {
+ val = msk;
+ msk = 0xffffffff;
+ }
+ if (dir_fl) {
+ ret = pvr2_hdw_gpio_chg_dir(hdw,msk,val);
+ } else {
+ ret = pvr2_hdw_gpio_chg_out(hdw,msk,val);
+ }
+ return ret;
+ }
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc failed to recognize cmd: \"%.*s\"",wlen,wptr);
+ return -EINVAL;
+}
+
+
+int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf,
+ unsigned int count)
+{
+ unsigned int bcnt = 0;
+ int ret;
+
+ while (count) {
+ for (bcnt = 0; bcnt < count; bcnt++) {
+ if (buf[bcnt] == '\n') break;
+ }
+
+ ret = pvr2_debugifc_do1cmd(hdw,buf,bcnt);
+ if (ret < 0) return ret;
+ if (bcnt < count) bcnt++;
+ buf += bcnt;
+ count -= bcnt;
+ }
+
+ return 0;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.h b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
new file mode 100644
index 000000000000..990b02d35d36
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEBUGIFC_H
+#define __PVRUSB2_DEBUGIFC_H
+
+struct pvr2_hdw;
+
+/* Non-intrusively print some useful debugging info from inside the
+ driver. This should work even if the driver appears to be
+ wedged. */
+int pvr2_debugifc_print_info(struct pvr2_hdw *,
+ char *buf_ptr,unsigned int buf_size);
+
+/* Print general status of driver. This will also trigger a probe of
+ the USB link. Unlike print_info(), this one synchronizes with the
+ driver so the information should be self-consistent (but it will
+ hang if the driver is wedged). */
+int pvr2_debugifc_print_status(struct pvr2_hdw *,
+ char *buf_ptr,unsigned int buf_size);
+
+/* Parse a string command into a driver action. */
+int pvr2_debugifc_docmd(struct pvr2_hdw *,
+ const char *buf_ptr,unsigned int buf_size);
+
+#endif /* __PVRUSB2_DEBUGIFC_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.c b/drivers/media/video/pvrusb2/pvrusb2-demod.c
new file mode 100644
index 000000000000..9686569a11f6
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.c
@@ -0,0 +1,126 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-demod.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/v4l2-common.h>
+
+
+struct pvr2_demod_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ int type_update_fl;
+};
+
+
+static void set_config(struct pvr2_demod_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ int cfg = 0;
+
+ switch (hdw->tuner_type) {
+ case TUNER_PHILIPS_FM1216ME_MK3:
+ case TUNER_PHILIPS_FM1236_MK3:
+ cfg = TDA9887_PORT1_ACTIVE|TDA9887_PORT2_ACTIVE;
+ break;
+ default:
+ break;
+ }
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c demod set_config(0x%x)",cfg);
+ pvr2_i2c_client_cmd(ctxt->client,TDA9887_SET_CONFIG,&cfg);
+ ctxt->type_update_fl = 0;
+}
+
+
+static int demod_check(struct pvr2_demod_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ if (hdw->tuner_updated) ctxt->type_update_fl = !0;
+ return ctxt->type_update_fl != 0;
+}
+
+
+static void demod_update(struct pvr2_demod_handler *ctxt)
+{
+ if (ctxt->type_update_fl) set_config(ctxt);
+}
+
+
+static void demod_detach(struct pvr2_demod_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int demod_describe(struct pvr2_demod_handler *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-demod");
+}
+
+
+const static struct pvr2_i2c_handler_functions tuner_funcs = {
+ .detach = (void (*)(void *))demod_detach,
+ .check = (int (*)(void *))demod_check,
+ .update = (void (*)(void *))demod_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))demod_describe,
+};
+
+
+int pvr2_i2c_demod_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_demod_handler *ctxt;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &tuner_funcs;
+ ctxt->type_update_fl = !0;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ cp->handler = &ctxt->i2c_handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tda9887 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.h b/drivers/media/video/pvrusb2/pvrusb2-demod.h
new file mode 100644
index 000000000000..4c4e40ffbf03
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEMOD_H
+#define __PVRUSB2_DEMOD_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_demod_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_DEMOD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
new file mode 100644
index 000000000000..94d383ff9889
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-eeprom.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+#define trace_eeprom(...) pvr2_trace(PVR2_TRACE_EEPROM,__VA_ARGS__)
+
+
+
+/*
+
+ Read and analyze data in the eeprom. Use tveeprom to figure out
+ the packet structure, since this is another Hauppauge device and
+ internally it has a family resemblence to ivtv-type devices
+
+*/
+
+#include <media/tveeprom.h>
+
+/* We seem to only be interested in the last 128 bytes of the EEPROM */
+#define EEPROM_SIZE 128
+
+/* Grab EEPROM contents, needed for direct method. */
+static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
+{
+ struct i2c_msg msg[2];
+ u8 *eeprom;
+ u8 iadd[2];
+ u8 addr;
+ u16 eepromSize;
+ unsigned int offs;
+ int ret;
+ int mode16 = 0;
+ unsigned pcnt,tcnt;
+ eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
+ if (!eeprom) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to allocate memory"
+ " required to read eeprom");
+ return 0;
+ }
+
+ trace_eeprom("Value for eeprom addr from controller was 0x%x",
+ hdw->eeprom_addr);
+ addr = hdw->eeprom_addr;
+ /* Seems that if the high bit is set, then the *real* eeprom
+ address is shifted right now bit position (noticed this in
+ newer PVR USB2 hardware) */
+ if (addr & 0x80) addr >>= 1;
+
+ /* FX2 documentation states that a 16bit-addressed eeprom is
+ expected if the I2C address is an odd number (yeah, this is
+ strange but it's what they do) */
+ mode16 = (addr & 1);
+ eepromSize = (mode16 ? 4096 : 256);
+ trace_eeprom("Examining %d byte eeprom at location 0x%x"
+ " using %d bit addressing",eepromSize,addr,
+ mode16 ? 16 : 8);
+
+ msg[0].addr = addr;
+ msg[0].flags = 0;
+ msg[0].len = mode16 ? 2 : 1;
+ msg[0].buf = iadd;
+ msg[1].addr = addr;
+ msg[1].flags = I2C_M_RD;
+
+ /* We have to do the actual eeprom data fetch ourselves, because
+ (1) we're only fetching part of the eeprom, and (2) if we were
+ getting the whole thing our I2C driver can't grab it in one
+ pass - which is what tveeprom is otherwise going to attempt */
+ memset(eeprom,0,EEPROM_SIZE);
+ for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
+ pcnt = 16;
+ if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
+ offs = tcnt + (eepromSize - EEPROM_SIZE);
+ if (mode16) {
+ iadd[0] = offs >> 8;
+ iadd[1] = offs;
+ } else {
+ iadd[0] = offs;
+ }
+ msg[1].len = pcnt;
+ msg[1].buf = eeprom+tcnt;
+ if ((ret = i2c_transfer(
+ &hdw->i2c_adap,
+ msg,sizeof(msg)/sizeof(msg[0]))) != 2) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "eeprom fetch set offs err=%d",ret);
+ kfree(eeprom);
+ return 0;
+ }
+ }
+ return eeprom;
+}
+
+
+/* Directly call eeprom analysis function within tveeprom. */
+int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
+{
+ u8 *eeprom;
+ struct tveeprom tvdata;
+
+ memset(&tvdata,0,sizeof(tvdata));
+
+ eeprom = pvr2_eeprom_fetch(hdw);
+ if (!eeprom) return -EINVAL;
+
+ {
+ struct i2c_client fake_client;
+ /* Newer version expects a useless client interface */
+ fake_client.addr = hdw->eeprom_addr;
+ fake_client.adapter = &hdw->i2c_adap;
+ tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
+ }
+
+ trace_eeprom("eeprom assumed v4l tveeprom module");
+ trace_eeprom("eeprom direct call results:");
+ trace_eeprom("has_radio=%d",tvdata.has_radio);
+ trace_eeprom("tuner_type=%d",tvdata.tuner_type);
+ trace_eeprom("tuner_formats=0x%x",tvdata.tuner_formats);
+ trace_eeprom("audio_processor=%d",tvdata.audio_processor);
+ trace_eeprom("model=%d",tvdata.model);
+ trace_eeprom("revision=%d",tvdata.revision);
+ trace_eeprom("serial_number=%d",tvdata.serial_number);
+ trace_eeprom("rev_str=%s",tvdata.rev_str);
+ hdw->tuner_type = tvdata.tuner_type;
+ hdw->serial_number = tvdata.serial_number;
+ hdw->std_mask_eeprom = tvdata.tuner_formats;
+
+ kfree(eeprom);
+
+ return 0;
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.h b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
new file mode 100644
index 000000000000..84242975dea7
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_EEPROM_H
+#define __PVRUSB2_EEPROM_H
+
+struct pvr2_hdw;
+
+int pvr2_eeprom_analyze(struct pvr2_hdw *);
+
+#endif /* __PVRUSB2_EEPROM_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
new file mode 100644
index 000000000000..2cc31695b435
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -0,0 +1,418 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/device.h> // for linux/firmware.h
+#include <linux/firmware.h>
+#include "pvrusb2-util.h"
+#include "pvrusb2-encoder.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+
+
+/* Firmware mailbox flags - definitions found from ivtv */
+#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
+#define IVTV_MBOX_DRIVER_DONE 0x00000002
+#define IVTV_MBOX_DRIVER_BUSY 0x00000001
+
+
+static int pvr2_encoder_write_words(struct pvr2_hdw *hdw,
+ const u32 *data, unsigned int dlen)
+{
+ unsigned int idx;
+ int ret;
+ unsigned int offs = 0;
+ unsigned int chunkCnt;
+
+ /*
+
+ Format: First byte must be 0x01. Remaining 32 bit words are
+ spread out into chunks of 7 bytes each, little-endian ordered,
+ offset at zero within each 2 blank bytes following and a
+ single byte that is 0x44 plus the offset of the word. Repeat
+ request for additional words, with offset adjusted
+ accordingly.
+
+ */
+ while (dlen) {
+ chunkCnt = 8;
+ if (chunkCnt > dlen) chunkCnt = dlen;
+ memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
+ hdw->cmd_buffer[0] = 0x01;
+ for (idx = 0; idx < chunkCnt; idx++) {
+ hdw->cmd_buffer[1+(idx*7)+6] = 0x44 + idx + offs;
+ PVR2_DECOMPOSE_LE(hdw->cmd_buffer, 1+(idx*7),
+ data[idx]);
+ }
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1+(chunkCnt*7),
+ 0,0);
+ if (ret) return ret;
+ data += chunkCnt;
+ dlen -= chunkCnt;
+ offs += chunkCnt;
+ }
+
+ return 0;
+}
+
+
+static int pvr2_encoder_read_words(struct pvr2_hdw *hdw,int statusFl,
+ u32 *data, unsigned int dlen)
+{
+ unsigned int idx;
+ int ret;
+ unsigned int offs = 0;
+ unsigned int chunkCnt;
+
+ /*
+
+ Format: First byte must be 0x02 (status check) or 0x28 (read
+ back block of 32 bit words). Next 6 bytes must be zero,
+ followed by a single byte of 0x44+offset for portion to be
+ read. Returned data is packed set of 32 bits words that were
+ read.
+
+ */
+
+ while (dlen) {
+ chunkCnt = 16;
+ if (chunkCnt > dlen) chunkCnt = dlen;
+ memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
+ hdw->cmd_buffer[0] = statusFl ? 0x02 : 0x28;
+ hdw->cmd_buffer[7] = 0x44 + offs;
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,8,
+ hdw->cmd_buffer,chunkCnt * 4);
+ if (ret) return ret;
+
+ for (idx = 0; idx < chunkCnt; idx++) {
+ data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4);
+ }
+ data += chunkCnt;
+ dlen -= chunkCnt;
+ offs += chunkCnt;
+ }
+
+ return 0;
+}
+
+
+/* This prototype is set up to be compatible with the
+ cx2341x_mbox_func prototype in cx2341x.h, which should be in
+ kernels 2.6.18 or later. We do this so that we can enable
+ cx2341x.ko to write to our encoder (by handing it a pointer to this
+ function). For earlier kernels this doesn't really matter. */
+static int pvr2_encoder_cmd(void *ctxt,
+ int cmd,
+ int arg_cnt_send,
+ int arg_cnt_recv,
+ u32 *argp)
+{
+ unsigned int poll_count;
+ int ret = 0;
+ unsigned int idx;
+ /* These sizes look to be limited by the FX2 firmware implementation */
+ u32 wrData[16];
+ u32 rdData[16];
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt;
+
+
+ /*
+
+ The encoder seems to speak entirely using blocks 32 bit words.
+ In ivtv driver terms, this is a mailbox which we populate with
+ data and watch what the hardware does with it. The first word
+ is a set of flags used to control the transaction, the second
+ word is the command to execute, the third byte is zero (ivtv
+ driver suggests that this is some kind of return value), and
+ the fourth byte is a specified timeout (windows driver always
+ uses 0x00060000 except for one case when it is zero). All
+ successive words are the argument words for the command.
+
+ First, write out the entire set of words, with the first word
+ being zero.
+
+ Next, write out just the first word again, but set it to
+ IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which
+ probably means "go").
+
+ Next, read back 16 words as status. Check the first word,
+ which should have IVTV_MBOX_FIRMWARE_DONE set. If however
+ that bit is not set, then the command isn't done so repeat the
+ read.
+
+ Next, read back 32 words and compare with the original
+ arugments. Hopefully they will match.
+
+ Finally, write out just the first word again, but set it to
+ 0x0 this time (which probably means "idle").
+
+ */
+
+ if (arg_cnt_send > (sizeof(wrData)/sizeof(wrData[0]))-4) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many input arguments"
+ " (was given %u limit %u)",
+ arg_cnt_send,
+ (unsigned int)(sizeof(wrData)/sizeof(wrData[0])) - 4);
+ return -EINVAL;
+ }
+
+ if (arg_cnt_recv > (sizeof(rdData)/sizeof(rdData[0]))-4) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many return arguments"
+ " (was given %u limit %u)",
+ arg_cnt_recv,
+ (unsigned int)(sizeof(rdData)/sizeof(rdData[0])) - 4);
+ return -EINVAL;
+ }
+
+
+ LOCK_TAKE(hdw->ctl_lock); do {
+
+ wrData[0] = 0;
+ wrData[1] = cmd;
+ wrData[2] = 0;
+ wrData[3] = 0x00060000;
+ for (idx = 0; idx < arg_cnt_send; idx++) {
+ wrData[idx+4] = argp[idx];
+ }
+ for (; idx < (sizeof(wrData)/sizeof(wrData[0]))-4; idx++) {
+ wrData[idx+4] = 0;
+ }
+
+ ret = pvr2_encoder_write_words(hdw,wrData,idx);
+ if (ret) break;
+ wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY;
+ ret = pvr2_encoder_write_words(hdw,wrData,1);
+ if (ret) break;
+ poll_count = 0;
+ while (1) {
+ if (poll_count < 10000000) poll_count++;
+ ret = pvr2_encoder_read_words(hdw,!0,rdData,1);
+ if (ret) break;
+ if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) {
+ break;
+ }
+ if (poll_count == 100) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "***WARNING*** device's encoder"
+ " appears to be stuck"
+ " (status=0%08x)",rdData[0]);
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Encoder command: 0x%02x",cmd);
+ for (idx = 4; idx < arg_cnt_send; idx++) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Encoder arg%d: 0x%08x",
+ idx-3,wrData[idx]);
+ }
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Giving up waiting."
+ " It is likely that"
+ " this is a bad idea...");
+ ret = -EBUSY;
+ break;
+ }
+ }
+ if (ret) break;
+ wrData[0] = 0x7;
+ ret = pvr2_encoder_read_words(
+ hdw,0,rdData,
+ sizeof(rdData)/sizeof(rdData[0]));
+ if (ret) break;
+ for (idx = 0; idx < arg_cnt_recv; idx++) {
+ argp[idx] = rdData[idx+4];
+ }
+
+ wrData[0] = 0x0;
+ ret = pvr2_encoder_write_words(hdw,wrData,1);
+ if (ret) break;
+
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
+ int args, ...)
+{
+ va_list vl;
+ unsigned int idx;
+ u32 data[12];
+
+ if (args > sizeof(data)/sizeof(data[0])) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many arguments"
+ " (was given %u limit %u)",
+ args,(unsigned int)(sizeof(data)/sizeof(data[0])));
+ return -EINVAL;
+ }
+
+ va_start(vl, args);
+ for (idx = 0; idx < args; idx++) {
+ data[idx] = va_arg(vl, u32);
+ }
+ va_end(vl);
+
+ return pvr2_encoder_cmd(hdw,cmd,args,0,data);
+}
+
+int pvr2_encoder_configure(struct pvr2_hdw *hdw)
+{
+ int ret;
+ pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
+ " (cx2341x module)");
+ hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
+ hdw->enc_ctl_state.width = hdw->res_hor_val;
+ hdw->enc_ctl_state.height = hdw->res_ver_val;
+ hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur &
+ (V4L2_STD_NTSC|V4L2_STD_PAL_M)) ?
+ 0 : 1);
+
+ ret = 0;
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2,
+ 0xf0, 0xf0);
+
+ /* setup firmware to notify us about some events (don't know why...) */
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4,
+ 0, 0, 0x10000000, 0xffffffff);
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_VBI_LINE, 5,
+ 0xffffffff,0,0,0,0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to configure cx32416");
+ return ret;
+ }
+
+ ret = cx2341x_update(hdw,pvr2_encoder_cmd,
+ (hdw->enc_cur_valid ? &hdw->enc_cur_state : 0),
+ &hdw->enc_ctl_state);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error from cx2341x module code=%d",ret);
+ return ret;
+ }
+
+ ret = 0;
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw, CX2341X_ENC_INITIALIZE_INPUT, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to initialize cx32416 video input");
+ return ret;
+ }
+
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+ memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
+ sizeof(struct cx2341x_mpeg_params));
+ hdw->enc_cur_valid = !0;
+ return 0;
+}
+
+
+int pvr2_encoder_start(struct pvr2_hdw *hdw)
+{
+ int status;
+
+ /* unmask some interrupts */
+ pvr2_write_register(hdw, 0x0048, 0xbfffffff);
+
+ /* change some GPIO data */
+ pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000481);
+ pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
+
+ if (hdw->config == pvr2_config_vbi) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0x01,0x14);
+ } else if (hdw->config == pvr2_config_mpeg) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0,0x13);
+ } else {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0,0x13);
+ }
+ if (!status) {
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_RUN);
+ }
+ return status;
+}
+
+int pvr2_encoder_stop(struct pvr2_hdw *hdw)
+{
+ int status;
+
+ /* mask all interrupts */
+ pvr2_write_register(hdw, 0x0048, 0xffffffff);
+
+ if (hdw->config == pvr2_config_vbi) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0x01,0x14);
+ } else if (hdw->config == pvr2_config_mpeg) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0,0x13);
+ } else {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0,0x13);
+ }
+
+ /* change some GPIO data */
+ /* Note: Bit d7 of dir appears to control the LED. So we shut it
+ off here. */
+ pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000401);
+ pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
+
+ if (!status) {
+ hdw->subsys_enabled_mask &= ~(1<<PVR2_SUBSYS_B_ENC_RUN);
+ }
+ return status;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.h b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
new file mode 100644
index 000000000000..01b5a0b89c03
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_ENCODER_H
+#define __PVRUSB2_ENCODER_H
+
+struct pvr2_hdw;
+
+int pvr2_encoder_configure(struct pvr2_hdw *);
+int pvr2_encoder_start(struct pvr2_hdw *);
+int pvr2_encoder_stop(struct pvr2_hdw *);
+
+#endif /* __PVRUSB2_ENCODER_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
new file mode 100644
index 000000000000..ba2afbfe32c5
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -0,0 +1,384 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_HDW_INTERNAL_H
+#define __PVRUSB2_HDW_INTERNAL_H
+
+/*
+
+ This header sets up all the internal structures and definitions needed to
+ track and coordinate the driver's interaction with the hardware. ONLY
+ source files which actually implement part of that whole circus should be
+ including this header. Higher levels, like the external layers to the
+ various public APIs (V4L, sysfs, etc) should NOT ever include this
+ private, internal header. This means that pvrusb2-hdw, pvrusb2-encoder,
+ etc will include this, but pvrusb2-v4l should not.
+
+*/
+
+#include <linux/config.h>
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-io.h"
+#include <media/cx2341x.h>
+
+/* Legal values for the SRATE state variable */
+#define PVR2_CVAL_SRATE_48 0
+#define PVR2_CVAL_SRATE_44_1 1
+
+/* Legal values for the AUDIOBITRATE state variable */
+#define PVR2_CVAL_AUDIOBITRATE_384 0
+#define PVR2_CVAL_AUDIOBITRATE_320 1
+#define PVR2_CVAL_AUDIOBITRATE_256 2
+#define PVR2_CVAL_AUDIOBITRATE_224 3
+#define PVR2_CVAL_AUDIOBITRATE_192 4
+#define PVR2_CVAL_AUDIOBITRATE_160 5
+#define PVR2_CVAL_AUDIOBITRATE_128 6
+#define PVR2_CVAL_AUDIOBITRATE_112 7
+#define PVR2_CVAL_AUDIOBITRATE_96 8
+#define PVR2_CVAL_AUDIOBITRATE_80 9
+#define PVR2_CVAL_AUDIOBITRATE_64 10
+#define PVR2_CVAL_AUDIOBITRATE_56 11
+#define PVR2_CVAL_AUDIOBITRATE_48 12
+#define PVR2_CVAL_AUDIOBITRATE_32 13
+#define PVR2_CVAL_AUDIOBITRATE_VBR 14
+
+/* Legal values for the AUDIOEMPHASIS state variable */
+#define PVR2_CVAL_AUDIOEMPHASIS_NONE 0
+#define PVR2_CVAL_AUDIOEMPHASIS_50_15 1
+#define PVR2_CVAL_AUDIOEMPHASIS_CCITT 2
+
+/* Legal values for PVR2_CID_HSM */
+#define PVR2_CVAL_HSM_FAIL 0
+#define PVR2_CVAL_HSM_FULL 1
+#define PVR2_CVAL_HSM_HIGH 2
+
+#define PVR2_VID_ENDPOINT 0x84
+#define PVR2_UNK_ENDPOINT 0x86 /* maybe raw yuv ? */
+#define PVR2_VBI_ENDPOINT 0x88
+
+#define PVR2_CTL_BUFFSIZE 64
+
+#define FREQTABLE_SIZE 500
+
+#define LOCK_TAKE(x) do { mutex_lock(&x##_mutex); x##_held = !0; } while (0)
+#define LOCK_GIVE(x) do { x##_held = 0; mutex_unlock(&x##_mutex); } while (0)
+
+struct pvr2_decoder;
+
+typedef int (*pvr2_ctlf_is_dirty)(struct pvr2_ctrl *);
+typedef void (*pvr2_ctlf_clear_dirty)(struct pvr2_ctrl *);
+typedef int (*pvr2_ctlf_get_value)(struct pvr2_ctrl *,int *);
+typedef int (*pvr2_ctlf_set_value)(struct pvr2_ctrl *,int msk,int val);
+typedef int (*pvr2_ctlf_val_to_sym)(struct pvr2_ctrl *,int msk,int val,
+ char *,unsigned int,unsigned int *);
+typedef int (*pvr2_ctlf_sym_to_val)(struct pvr2_ctrl *,
+ const char *,unsigned int,
+ int *mskp,int *valp);
+typedef unsigned int (*pvr2_ctlf_get_v4lflags)(struct pvr2_ctrl *);
+
+/* This structure describes a specific control. A table of these is set up
+ in pvrusb2-hdw.c. */
+struct pvr2_ctl_info {
+ /* Control's name suitable for use as an identifier */
+ const char *name;
+
+ /* Short description of control */
+ const char *desc;
+
+ /* Control's implementation */
+ pvr2_ctlf_get_value get_value; /* Get its value */
+ pvr2_ctlf_set_value set_value; /* Set its value */
+ pvr2_ctlf_val_to_sym val_to_sym; /* Custom convert value->symbol */
+ pvr2_ctlf_sym_to_val sym_to_val; /* Custom convert symbol->value */
+ pvr2_ctlf_is_dirty is_dirty; /* Return true if dirty */
+ pvr2_ctlf_clear_dirty clear_dirty; /* Clear dirty state */
+ pvr2_ctlf_get_v4lflags get_v4lflags;/* Retrieve v4l flags */
+
+ /* Control's type (int, enum, bitmask) */
+ enum pvr2_ctl_type type;
+
+ /* Associated V4L control ID, if any */
+ int v4l_id;
+
+ /* Associated driver internal ID, if any */
+ int internal_id;
+
+ /* Don't implicitly initialize this control's value */
+ int skip_init;
+
+ /* Starting value for this control */
+ int default_value;
+
+ /* Type-specific control information */
+ union {
+ struct { /* Integer control */
+ long min_value; /* lower limit */
+ long max_value; /* upper limit */
+ } type_int;
+ struct { /* enumerated control */
+ unsigned int count; /* enum value count */
+ const char **value_names; /* symbol names */
+ } type_enum;
+ struct { /* bitmask control */
+ unsigned int valid_bits; /* bits in use */
+ const char **bit_names; /* symbol name/bit */
+ } type_bitmask;
+ } def;
+};
+
+
+/* Same as pvr2_ctl_info, but includes storage for the control description */
+#define PVR2_CTLD_INFO_DESC_SIZE 32
+struct pvr2_ctld_info {
+ struct pvr2_ctl_info info;
+ char desc[PVR2_CTLD_INFO_DESC_SIZE];
+};
+
+struct pvr2_ctrl {
+ const struct pvr2_ctl_info *info;
+ struct pvr2_hdw *hdw;
+};
+
+
+struct pvr2_audio_stat {
+ void *ctxt;
+ void (*detach)(void *);
+ int (*status)(void *);
+};
+
+struct pvr2_decoder_ctrl {
+ void *ctxt;
+ void (*detach)(void *);
+ void (*enable)(void *,int);
+ int (*tuned)(void *);
+ void (*force_reset)(void *);
+};
+
+#define PVR2_I2C_PEND_DETECT 0x01 /* Need to detect a client type */
+#define PVR2_I2C_PEND_CLIENT 0x02 /* Client needs a specific update */
+#define PVR2_I2C_PEND_REFRESH 0x04 /* Client has specific pending bits */
+#define PVR2_I2C_PEND_STALE 0x08 /* Broadcast pending bits */
+
+#define PVR2_I2C_PEND_ALL (PVR2_I2C_PEND_DETECT |\
+ PVR2_I2C_PEND_CLIENT |\
+ PVR2_I2C_PEND_REFRESH |\
+ PVR2_I2C_PEND_STALE)
+
+/* Disposition of firmware1 loading situation */
+#define FW1_STATE_UNKNOWN 0
+#define FW1_STATE_MISSING 1
+#define FW1_STATE_FAILED 2
+#define FW1_STATE_RELOAD 3
+#define FW1_STATE_OK 4
+
+/* Known major hardware variants, keyed from device ID */
+#define PVR2_HDW_TYPE_29XXX 0
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+#define PVR2_HDW_TYPE_24XXX 1
+#endif
+
+typedef int (*pvr2_i2c_func)(struct pvr2_hdw *,u8,u8 *,u16,u8 *, u16);
+#define PVR2_I2C_FUNC_CNT 128
+
+/* This structure contains all state data directly needed to
+ manipulate the hardware (as opposed to complying with a kernel
+ interface) */
+struct pvr2_hdw {
+ /* Underlying USB device handle */
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_intf;
+
+ /* Device type, one of PVR2_HDW_TYPE_xxxxx */
+ unsigned int hdw_type;
+
+ /* Video spigot */
+ struct pvr2_stream *vid_stream;
+
+ /* Mutex for all hardware state control */
+ struct mutex big_lock_mutex;
+ int big_lock_held; /* For debugging */
+
+ void (*poll_trigger_func)(void *);
+ void *poll_trigger_data;
+
+ char name[32];
+
+ /* I2C stuff */
+ struct i2c_adapter i2c_adap;
+ struct i2c_algorithm i2c_algo;
+ pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
+ int i2c_cx25840_hack_state;
+ int i2c_linked;
+ unsigned int i2c_pend_types; /* Which types of update are needed */
+ unsigned long i2c_pend_mask; /* Change bits we need to scan */
+ unsigned long i2c_stale_mask; /* Pending broadcast change bits */
+ unsigned long i2c_active_mask; /* All change bits currently in use */
+ struct list_head i2c_clients;
+ struct mutex i2c_list_lock;
+
+ /* Frequency table */
+ unsigned int freqTable[FREQTABLE_SIZE];
+ unsigned int freqProgSlot;
+ unsigned int freqSlot;
+
+ /* Stuff for handling low level control interaction with device */
+ struct mutex ctl_lock_mutex;
+ int ctl_lock_held; /* For debugging */
+ struct urb *ctl_write_urb;
+ struct urb *ctl_read_urb;
+ unsigned char *ctl_write_buffer;
+ unsigned char *ctl_read_buffer;
+ volatile int ctl_write_pend_flag;
+ volatile int ctl_read_pend_flag;
+ volatile int ctl_timeout_flag;
+ struct completion ctl_done;
+ unsigned char cmd_buffer[PVR2_CTL_BUFFSIZE];
+ int cmd_debug_state; // Low level command debugging info
+ unsigned char cmd_debug_code; //
+ unsigned int cmd_debug_write_len; //
+ unsigned int cmd_debug_read_len; //
+
+ int flag_ok; // device in known good state
+ int flag_disconnected; // flag_ok == 0 due to disconnect
+ int flag_init_ok; // true if structure is fully initialized
+ int flag_streaming_enabled; // true if streaming should be on
+ int fw1_state; // current situation with fw1
+
+ int flag_decoder_is_tuned;
+
+ struct pvr2_decoder_ctrl *decoder_ctrl;
+
+ // CPU firmware info (used to help find / save firmware data)
+ char *fw_buffer;
+ unsigned int fw_size;
+
+ // Which subsystem pieces have been enabled / configured
+ unsigned long subsys_enabled_mask;
+
+ // Which subsystems are manipulated to enable streaming
+ unsigned long subsys_stream_mask;
+
+ // True if there is a request to trigger logging of state in each
+ // module.
+ int log_requested;
+
+ /* Tuner / frequency control stuff */
+ unsigned int tuner_type;
+ int tuner_updated;
+ unsigned int freqVal;
+ int freqDirty;
+
+ /* Video standard handling */
+ v4l2_std_id std_mask_eeprom; // Hardware supported selections
+ v4l2_std_id std_mask_avail; // Which standards we may select from
+ v4l2_std_id std_mask_cur; // Currently selected standard(s)
+ unsigned int std_enum_cnt; // # of enumerated standards
+ int std_enum_cur; // selected standard enumeration value
+ int std_dirty; // True if std_mask_cur has changed
+ struct pvr2_ctl_info std_info_enum;
+ struct pvr2_ctl_info std_info_avail;
+ struct pvr2_ctl_info std_info_cur;
+ struct v4l2_standard *std_defs;
+ const char **std_enum_names;
+
+ // Generated string names, one per actual V4L2 standard
+ const char *std_mask_ptrs[32];
+ char std_mask_names[32][10];
+
+ int unit_number; /* ID for driver instance */
+ unsigned long serial_number; /* ID for hardware itself */
+
+ /* Minor number used by v4l logic (yes, this is a hack, as there should
+ be no v4l junk here). Probably a better way to do this. */
+ int v4l_minor_number;
+
+ /* Location of eeprom or a negative number if none */
+ int eeprom_addr;
+
+ enum pvr2_config config;
+
+ /* Information about what audio signal we're hearing */
+ int flag_stereo;
+ int flag_bilingual;
+ struct pvr2_audio_stat *audio_stat;
+
+ /* Control state needed for cx2341x module */
+ struct cx2341x_mpeg_params enc_cur_state;
+ struct cx2341x_mpeg_params enc_ctl_state;
+ /* True if an encoder attribute has changed */
+ int enc_stale;
+ /* True if enc_cur_state is valid */
+ int enc_cur_valid;
+
+ /* Control state */
+#define VCREATE_DATA(lab) int lab##_val; int lab##_dirty
+ VCREATE_DATA(brightness);
+ VCREATE_DATA(contrast);
+ VCREATE_DATA(saturation);
+ VCREATE_DATA(hue);
+ VCREATE_DATA(volume);
+ VCREATE_DATA(balance);
+ VCREATE_DATA(bass);
+ VCREATE_DATA(treble);
+ VCREATE_DATA(mute);
+ VCREATE_DATA(input);
+ VCREATE_DATA(audiomode);
+ VCREATE_DATA(res_hor);
+ VCREATE_DATA(res_ver);
+ VCREATE_DATA(srate);
+#undef VCREATE_DATA
+
+ struct pvr2_ctld_info *mpeg_ctrl_info;
+
+ struct pvr2_ctrl *controls;
+ unsigned int control_cnt;
+};
+
+int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw);
+
+unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *);
+
+void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val);
+
+void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw);
+void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw);
+
+int pvr2_i2c_basic_op(struct pvr2_hdw *,u8 i2c_addr,
+ u8 *wdata,u16 wlen,
+ u8 *rdata,u16 rlen);
+
+#endif /* __PVRUSB2_HDW_INTERNAL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
new file mode 100644
index 000000000000..643c471375da
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -0,0 +1,3120 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/videodev2.h>
+#include <asm/semaphore.h>
+#include "pvrusb2.h"
+#include "pvrusb2-std.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-eeprom.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-encoder.h"
+#include "pvrusb2-debug.h"
+
+struct usb_device_id pvr2_device_table[] = {
+ [PVR2_HDW_TYPE_29XXX] = { USB_DEVICE(0x2040, 0x2900) },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = { USB_DEVICE(0x2040, 0x2400) },
+#endif
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, pvr2_device_table);
+
+static const char *pvr2_device_names[] = {
+ [PVR2_HDW_TYPE_29XXX] = "WinTV PVR USB2 Model Category 29xxxx",
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = "WinTV PVR USB2 Model Category 24xxxx",
+#endif
+};
+
+struct pvr2_string_table {
+ const char **lst;
+ unsigned int cnt;
+};
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+// Names of other client modules to request for 24xxx model hardware
+static const char *pvr2_client_24xxx[] = {
+ "cx25840",
+ "tuner",
+ "tda9887",
+ "wm8775",
+};
+#endif
+
+// Names of other client modules to request for 29xxx model hardware
+static const char *pvr2_client_29xxx[] = {
+ "msp3400",
+ "saa7115",
+ "tuner",
+ "tda9887",
+};
+
+static struct pvr2_string_table pvr2_client_lists[] = {
+ [PVR2_HDW_TYPE_29XXX] = {
+ pvr2_client_29xxx,
+ sizeof(pvr2_client_29xxx)/sizeof(pvr2_client_29xxx[0]),
+ },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = {
+ pvr2_client_24xxx,
+ sizeof(pvr2_client_24xxx)/sizeof(pvr2_client_24xxx[0]),
+ },
+#endif
+};
+
+static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = 0};
+DECLARE_MUTEX(pvr2_unit_sem);
+
+static int ctlchg = 0;
+static int initusbreset = 1;
+static int procreload = 0;
+static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
+static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
+static int video_std[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
+static int init_pause_msec = 0;
+
+module_param(ctlchg, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
+module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
+module_param(initusbreset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
+module_param(procreload, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(procreload,
+ "Attempt init failure recovery with firmware reload");
+module_param_array(tuner, int, NULL, 0444);
+MODULE_PARM_DESC(tuner,"specify installed tuner type");
+module_param_array(video_std, int, NULL, 0444);
+MODULE_PARM_DESC(video_std,"specify initial video standard");
+module_param_array(tolerance, int, NULL, 0444);
+MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
+
+#define PVR2_CTL_WRITE_ENDPOINT 0x01
+#define PVR2_CTL_READ_ENDPOINT 0x81
+
+#define PVR2_GPIO_IN 0x9008
+#define PVR2_GPIO_OUT 0x900c
+#define PVR2_GPIO_DIR 0x9020
+
+#define trace_firmware(...) pvr2_trace(PVR2_TRACE_FIRMWARE,__VA_ARGS__)
+
+#define PVR2_FIRMWARE_ENDPOINT 0x02
+
+/* size of a firmware chunk */
+#define FIRMWARE_CHUNK_SIZE 0x2000
+
+/* Define the list of additional controls we'll dynamically construct based
+ on query of the cx2341x module. */
+struct pvr2_mpeg_ids {
+ const char *strid;
+ int id;
+};
+static const struct pvr2_mpeg_ids mpeg_ids[] = {
+ {
+ .strid = "audio_layer",
+ .id = V4L2_CID_MPEG_AUDIO_ENCODING,
+ },{
+ .strid = "audio_bitrate",
+ .id = V4L2_CID_MPEG_AUDIO_L2_BITRATE,
+ },{
+ /* Already using audio_mode elsewhere :-( */
+ .strid = "mpeg_audio_mode",
+ .id = V4L2_CID_MPEG_AUDIO_MODE,
+ },{
+ .strid = "mpeg_audio_mode_extension",
+ .id = V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
+ },{
+ .strid = "audio_emphasis",
+ .id = V4L2_CID_MPEG_AUDIO_EMPHASIS,
+ },{
+ .strid = "audio_crc",
+ .id = V4L2_CID_MPEG_AUDIO_CRC,
+ },{
+ .strid = "video_aspect",
+ .id = V4L2_CID_MPEG_VIDEO_ASPECT,
+ },{
+ .strid = "video_b_frames",
+ .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ },{
+ .strid = "video_gop_size",
+ .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ },{
+ .strid = "video_gop_closure",
+ .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+ },{
+ .strid = "video_pulldown",
+ .id = V4L2_CID_MPEG_VIDEO_PULLDOWN,
+ },{
+ .strid = "video_bitrate_mode",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ },{
+ .strid = "video_bitrate",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ },{
+ .strid = "video_bitrate_peak",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ },{
+ .strid = "video_temporal_decimation",
+ .id = V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION,
+ },{
+ .strid = "stream_type",
+ .id = V4L2_CID_MPEG_STREAM_TYPE,
+ },{
+ .strid = "video_spatial_filter_mode",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
+ },{
+ .strid = "video_spatial_filter",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
+ },{
+ .strid = "video_luma_spatial_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
+ },{
+ .strid = "video_chroma_spatial_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
+ },{
+ .strid = "video_temporal_filter_mode",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
+ },{
+ .strid = "video_temporal_filter",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
+ },{
+ .strid = "video_median_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
+ },{
+ .strid = "video_luma_median_filter_top",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
+ },{
+ .strid = "video_luma_median_filter_bottom",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
+ },{
+ .strid = "video_chroma_median_filter_top",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
+ },{
+ .strid = "video_chroma_median_filter_bottom",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
+ }
+};
+#define MPEGDEF_COUNT (sizeof(mpeg_ids)/sizeof(mpeg_ids[0]))
+
+static const char *control_values_srate[] = {
+ [PVR2_CVAL_SRATE_48] = "48KHz",
+ [PVR2_CVAL_SRATE_44_1] = "44.1KHz",
+};
+
+
+
+
+static const char *control_values_input[] = {
+ [PVR2_CVAL_INPUT_TV] = "television", /*xawtv needs this name*/
+ [PVR2_CVAL_INPUT_RADIO] = "radio",
+ [PVR2_CVAL_INPUT_SVIDEO] = "s-video",
+ [PVR2_CVAL_INPUT_COMPOSITE] = "composite",
+};
+
+
+static const char *control_values_audiomode[] = {
+ [V4L2_TUNER_MODE_MONO] = "Mono",
+ [V4L2_TUNER_MODE_STEREO] = "Stereo",
+ [V4L2_TUNER_MODE_LANG1] = "Lang1",
+ [V4L2_TUNER_MODE_LANG2] = "Lang2",
+ [V4L2_TUNER_MODE_LANG1_LANG2] = "Lang1+Lang2",
+};
+
+
+static const char *control_values_hsm[] = {
+ [PVR2_CVAL_HSM_FAIL] = "Fail",
+ [PVR2_CVAL_HSM_HIGH] = "High",
+ [PVR2_CVAL_HSM_FULL] = "Full",
+};
+
+
+static const char *control_values_subsystem[] = {
+ [PVR2_SUBSYS_B_ENC_FIRMWARE] = "enc_firmware",
+ [PVR2_SUBSYS_B_ENC_CFG] = "enc_config",
+ [PVR2_SUBSYS_B_DIGITIZER_RUN] = "digitizer_run",
+ [PVR2_SUBSYS_B_USBSTREAM_RUN] = "usbstream_run",
+ [PVR2_SUBSYS_B_ENC_RUN] = "enc_run",
+};
+
+
+static int ctrl_channelfreq_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
+ *vp = hdw->freqTable[hdw->freqProgSlot-1];
+ } else {
+ *vp = 0;
+ }
+ return 0;
+}
+
+static int ctrl_channelfreq_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
+ hdw->freqTable[hdw->freqProgSlot-1] = v;
+ }
+ return 0;
+}
+
+static int ctrl_channelprog_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqProgSlot;
+ return 0;
+}
+
+static int ctrl_channelprog_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((v >= 0) && (v <= FREQTABLE_SIZE)) {
+ hdw->freqProgSlot = v;
+ }
+ return 0;
+}
+
+static int ctrl_channel_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqSlot;
+ return 0;
+}
+
+static int ctrl_channel_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ unsigned freq = 0;
+ struct pvr2_hdw *hdw = cptr->hdw;
+ hdw->freqSlot = v;
+ if ((hdw->freqSlot > 0) && (hdw->freqSlot <= FREQTABLE_SIZE)) {
+ freq = hdw->freqTable[hdw->freqSlot-1];
+ }
+ if (freq && (freq != hdw->freqVal)) {
+ hdw->freqVal = freq;
+ hdw->freqDirty = !0;
+ }
+ return 0;
+}
+
+static int ctrl_freq_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqVal;
+ return 0;
+}
+
+static int ctrl_freq_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->freqDirty != 0;
+}
+
+static void ctrl_freq_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->freqDirty = 0;
+}
+
+static int ctrl_freq_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ hdw->freqVal = v;
+ hdw->freqDirty = !0;
+ hdw->freqSlot = 0;
+ return 0;
+}
+
+static int ctrl_cx2341x_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->enc_stale != 0;
+}
+
+static void ctrl_cx2341x_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->enc_stale = 0;
+}
+
+static int ctrl_cx2341x_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ int ret;
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = cptr->info->v4l_id;
+ ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
+ VIDIOC_G_EXT_CTRLS);
+ if (ret) return ret;
+ *vp = c1.value;
+ return 0;
+}
+
+static int ctrl_cx2341x_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ int ret;
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = cptr->info->v4l_id;
+ c1.value = v;
+ ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
+ VIDIOC_S_EXT_CTRLS);
+ if (ret) return ret;
+ cptr->hdw->enc_stale = !0;
+ return 0;
+}
+
+static unsigned int ctrl_cx2341x_getv4lflags(struct pvr2_ctrl *cptr)
+{
+ struct v4l2_queryctrl qctrl;
+ struct pvr2_ctl_info *info;
+ qctrl.id = cptr->info->v4l_id;
+ cx2341x_ctrl_query(&cptr->hdw->enc_ctl_state,&qctrl);
+ /* Strip out the const so we can adjust a function pointer. It's
+ OK to do this here because we know this is a dynamically created
+ control, so the underlying storage for the info pointer is (a)
+ private to us, and (b) not in read-only storage. Either we do
+ this or we significantly complicate the underlying control
+ implementation. */
+ info = (struct pvr2_ctl_info *)(cptr->info);
+ if (qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ if (info->set_value) {
+ info->set_value = 0;
+ }
+ } else {
+ if (!(info->set_value)) {
+ info->set_value = ctrl_cx2341x_set;
+ }
+ }
+ return qctrl.flags;
+}
+
+static int ctrl_streamingenabled_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->flag_streaming_enabled;
+ return 0;
+}
+
+static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ int result = pvr2_hdw_is_hsm(cptr->hdw);
+ *vp = PVR2_CVAL_HSM_FULL;
+ if (result < 0) *vp = PVR2_CVAL_HSM_FAIL;
+ if (result) *vp = PVR2_CVAL_HSM_HIGH;
+ return 0;
+}
+
+static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_mask_avail;
+ return 0;
+}
+
+static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ v4l2_std_id ns;
+ ns = hdw->std_mask_avail;
+ ns = (ns & ~m) | (v & m);
+ if (ns == hdw->std_mask_avail) return 0;
+ hdw->std_mask_avail = ns;
+ pvr2_hdw_internal_set_std_avail(hdw);
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return 0;
+}
+
+static int ctrl_std_val_to_sym(struct pvr2_ctrl *cptr,int msk,int val,
+ char *bufPtr,unsigned int bufSize,
+ unsigned int *len)
+{
+ *len = pvr2_std_id_to_str(bufPtr,bufSize,msk & val);
+ return 0;
+}
+
+static int ctrl_std_sym_to_val(struct pvr2_ctrl *cptr,
+ const char *bufPtr,unsigned int bufSize,
+ int *mskp,int *valp)
+{
+ int ret;
+ v4l2_std_id id;
+ ret = pvr2_std_str_to_id(&id,bufPtr,bufSize);
+ if (ret < 0) return ret;
+ if (mskp) *mskp = id;
+ if (valp) *valp = id;
+ return 0;
+}
+
+static int ctrl_stdcur_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_mask_cur;
+ return 0;
+}
+
+static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ v4l2_std_id ns;
+ ns = hdw->std_mask_cur;
+ ns = (ns & ~m) | (v & m);
+ if (ns == hdw->std_mask_cur) return 0;
+ hdw->std_mask_cur = ns;
+ hdw->std_dirty = !0;
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return 0;
+}
+
+static int ctrl_stdcur_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->std_dirty != 0;
+}
+
+static void ctrl_stdcur_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->std_dirty = 0;
+}
+
+static int ctrl_signal_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = ((pvr2_hdw_get_signal_status_internal(cptr->hdw) &
+ PVR2_SIGNAL_OK) ? 1 : 0);
+ return 0;
+}
+
+static int ctrl_subsys_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->subsys_enabled_mask;
+ return 0;
+}
+
+static int ctrl_subsys_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ pvr2_hdw_subsys_bit_chg_no_lock(cptr->hdw,m,v);
+ return 0;
+}
+
+static int ctrl_subsys_stream_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->subsys_stream_mask;
+ return 0;
+}
+
+static int ctrl_subsys_stream_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ pvr2_hdw_subsys_stream_bit_chg_no_lock(cptr->hdw,m,v);
+ return 0;
+}
+
+static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if (v < 0) return -EINVAL;
+ if (v > hdw->std_enum_cnt) return -EINVAL;
+ hdw->std_enum_cur = v;
+ if (!v) return 0;
+ v--;
+ if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0;
+ hdw->std_mask_cur = hdw->std_defs[v].id;
+ hdw->std_dirty = !0;
+ return 0;
+}
+
+
+static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_enum_cur;
+ return 0;
+}
+
+
+static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->std_dirty != 0;
+}
+
+
+static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->std_dirty = 0;
+}
+
+
+#define DEFINT(vmin,vmax) \
+ .type = pvr2_ctl_int, \
+ .def.type_int.min_value = vmin, \
+ .def.type_int.max_value = vmax
+
+#define DEFENUM(tab) \
+ .type = pvr2_ctl_enum, \
+ .def.type_enum.count = (sizeof(tab)/sizeof((tab)[0])), \
+ .def.type_enum.value_names = tab
+
+#define DEFBOOL \
+ .type = pvr2_ctl_bool
+
+#define DEFMASK(msk,tab) \
+ .type = pvr2_ctl_bitmask, \
+ .def.type_bitmask.valid_bits = msk, \
+ .def.type_bitmask.bit_names = tab
+
+#define DEFREF(vname) \
+ .set_value = ctrl_set_##vname, \
+ .get_value = ctrl_get_##vname, \
+ .is_dirty = ctrl_isdirty_##vname, \
+ .clear_dirty = ctrl_cleardirty_##vname
+
+
+#define VCREATE_FUNCS(vname) \
+static int ctrl_get_##vname(struct pvr2_ctrl *cptr,int *vp) \
+{*vp = cptr->hdw->vname##_val; return 0;} \
+static int ctrl_set_##vname(struct pvr2_ctrl *cptr,int m,int v) \
+{cptr->hdw->vname##_val = v; cptr->hdw->vname##_dirty = !0; return 0;} \
+static int ctrl_isdirty_##vname(struct pvr2_ctrl *cptr) \
+{return cptr->hdw->vname##_dirty != 0;} \
+static void ctrl_cleardirty_##vname(struct pvr2_ctrl *cptr) \
+{cptr->hdw->vname##_dirty = 0;}
+
+VCREATE_FUNCS(brightness)
+VCREATE_FUNCS(contrast)
+VCREATE_FUNCS(saturation)
+VCREATE_FUNCS(hue)
+VCREATE_FUNCS(volume)
+VCREATE_FUNCS(balance)
+VCREATE_FUNCS(bass)
+VCREATE_FUNCS(treble)
+VCREATE_FUNCS(mute)
+VCREATE_FUNCS(input)
+VCREATE_FUNCS(audiomode)
+VCREATE_FUNCS(res_hor)
+VCREATE_FUNCS(res_ver)
+VCREATE_FUNCS(srate)
+
+#define MIN_FREQ 55250000L
+#define MAX_FREQ 850000000L
+
+/* Table definition of all controls which can be manipulated */
+static const struct pvr2_ctl_info control_defs[] = {
+ {
+ .v4l_id = V4L2_CID_BRIGHTNESS,
+ .desc = "Brightness",
+ .name = "brightness",
+ .default_value = 128,
+ DEFREF(brightness),
+ DEFINT(0,255),
+ },{
+ .v4l_id = V4L2_CID_CONTRAST,
+ .desc = "Contrast",
+ .name = "contrast",
+ .default_value = 68,
+ DEFREF(contrast),
+ DEFINT(0,127),
+ },{
+ .v4l_id = V4L2_CID_SATURATION,
+ .desc = "Saturation",
+ .name = "saturation",
+ .default_value = 64,
+ DEFREF(saturation),
+ DEFINT(0,127),
+ },{
+ .v4l_id = V4L2_CID_HUE,
+ .desc = "Hue",
+ .name = "hue",
+ .default_value = 0,
+ DEFREF(hue),
+ DEFINT(-128,127),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_VOLUME,
+ .desc = "Volume",
+ .name = "volume",
+ .default_value = 65535,
+ DEFREF(volume),
+ DEFINT(0,65535),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_BALANCE,
+ .desc = "Balance",
+ .name = "balance",
+ .default_value = 0,
+ DEFREF(balance),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_BASS,
+ .desc = "Bass",
+ .name = "bass",
+ .default_value = 0,
+ DEFREF(bass),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_TREBLE,
+ .desc = "Treble",
+ .name = "treble",
+ .default_value = 0,
+ DEFREF(treble),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_MUTE,
+ .desc = "Mute",
+ .name = "mute",
+ .default_value = 0,
+ DEFREF(mute),
+ DEFBOOL,
+ },{
+ .desc = "Video Source",
+ .name = "input",
+ .internal_id = PVR2_CID_INPUT,
+ .default_value = PVR2_CVAL_INPUT_TV,
+ DEFREF(input),
+ DEFENUM(control_values_input),
+ },{
+ .desc = "Audio Mode",
+ .name = "audio_mode",
+ .internal_id = PVR2_CID_AUDIOMODE,
+ .default_value = V4L2_TUNER_MODE_STEREO,
+ DEFREF(audiomode),
+ DEFENUM(control_values_audiomode),
+ },{
+ .desc = "Horizontal capture resolution",
+ .name = "resolution_hor",
+ .internal_id = PVR2_CID_HRES,
+ .default_value = 720,
+ DEFREF(res_hor),
+ DEFINT(320,720),
+ },{
+ .desc = "Vertical capture resolution",
+ .name = "resolution_ver",
+ .internal_id = PVR2_CID_VRES,
+ .default_value = 480,
+ DEFREF(res_ver),
+ DEFINT(200,625),
+ },{
+ .v4l_id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
+ .desc = "Sample rate",
+ .name = "srate",
+ .default_value = PVR2_CVAL_SRATE_48,
+ DEFREF(srate),
+ DEFENUM(control_values_srate),
+ },{
+ .desc = "Tuner Frequency (Hz)",
+ .name = "frequency",
+ .internal_id = PVR2_CID_FREQUENCY,
+ .default_value = 175250000L,
+ .set_value = ctrl_freq_set,
+ .get_value = ctrl_freq_get,
+ .is_dirty = ctrl_freq_is_dirty,
+ .clear_dirty = ctrl_freq_clear_dirty,
+ DEFINT(MIN_FREQ,MAX_FREQ),
+ },{
+ .desc = "Channel",
+ .name = "channel",
+ .set_value = ctrl_channel_set,
+ .get_value = ctrl_channel_get,
+ DEFINT(0,FREQTABLE_SIZE),
+ },{
+ .desc = "Channel Program Frequency",
+ .name = "freq_table_value",
+ .set_value = ctrl_channelfreq_set,
+ .get_value = ctrl_channelfreq_get,
+ DEFINT(MIN_FREQ,MAX_FREQ),
+ },{
+ .desc = "Channel Program ID",
+ .name = "freq_table_channel",
+ .set_value = ctrl_channelprog_set,
+ .get_value = ctrl_channelprog_get,
+ DEFINT(0,FREQTABLE_SIZE),
+ },{
+ .desc = "Streaming Enabled",
+ .name = "streaming_enabled",
+ .get_value = ctrl_streamingenabled_get,
+ DEFBOOL,
+ },{
+ .desc = "USB Speed",
+ .name = "usb_speed",
+ .get_value = ctrl_hsm_get,
+ DEFENUM(control_values_hsm),
+ },{
+ .desc = "Signal Present",
+ .name = "signal_present",
+ .get_value = ctrl_signal_get,
+ DEFBOOL,
+ },{
+ .desc = "Video Standards Available Mask",
+ .name = "video_standard_mask_available",
+ .internal_id = PVR2_CID_STDAVAIL,
+ .skip_init = !0,
+ .get_value = ctrl_stdavail_get,
+ .set_value = ctrl_stdavail_set,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
+ },{
+ .desc = "Video Standards In Use Mask",
+ .name = "video_standard_mask_active",
+ .internal_id = PVR2_CID_STDCUR,
+ .skip_init = !0,
+ .get_value = ctrl_stdcur_get,
+ .set_value = ctrl_stdcur_set,
+ .is_dirty = ctrl_stdcur_is_dirty,
+ .clear_dirty = ctrl_stdcur_clear_dirty,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
+ },{
+ .desc = "Subsystem enabled mask",
+ .name = "debug_subsys_mask",
+ .skip_init = !0,
+ .get_value = ctrl_subsys_get,
+ .set_value = ctrl_subsys_set,
+ DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
+ },{
+ .desc = "Subsystem stream mask",
+ .name = "debug_subsys_stream_mask",
+ .skip_init = !0,
+ .get_value = ctrl_subsys_stream_get,
+ .set_value = ctrl_subsys_stream_set,
+ DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
+ },{
+ .desc = "Video Standard Name",
+ .name = "video_standard",
+ .internal_id = PVR2_CID_STDENUM,
+ .skip_init = !0,
+ .get_value = ctrl_stdenumcur_get,
+ .set_value = ctrl_stdenumcur_set,
+ .is_dirty = ctrl_stdenumcur_is_dirty,
+ .clear_dirty = ctrl_stdenumcur_clear_dirty,
+ .type = pvr2_ctl_enum,
+ }
+};
+
+#define CTRLDEF_COUNT (sizeof(control_defs)/sizeof(control_defs[0]))
+
+
+const char *pvr2_config_get_name(enum pvr2_config cfg)
+{
+ switch (cfg) {
+ case pvr2_config_empty: return "empty";
+ case pvr2_config_mpeg: return "mpeg";
+ case pvr2_config_vbi: return "vbi";
+ case pvr2_config_radio: return "radio";
+ }
+ return "<unknown>";
+}
+
+
+struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *hdw)
+{
+ return hdw->usb_dev;
+}
+
+
+unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *hdw)
+{
+ return hdw->serial_number;
+}
+
+
+struct pvr2_hdw *pvr2_hdw_find(int unit_number)
+{
+ if (unit_number < 0) return 0;
+ if (unit_number >= PVR_NUM) return 0;
+ return unit_pointers[unit_number];
+}
+
+
+int pvr2_hdw_get_unit_number(struct pvr2_hdw *hdw)
+{
+ return hdw->unit_number;
+}
+
+
+/* Attempt to locate one of the given set of files. Messages are logged
+ appropriate to what has been found. The return value will be 0 or
+ greater on success (it will be the index of the file name found) and
+ fw_entry will be filled in. Otherwise a negative error is returned on
+ failure. If the return value is -ENOENT then no viable firmware file
+ could be located. */
+static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
+ const struct firmware **fw_entry,
+ const char *fwtypename,
+ unsigned int fwcount,
+ const char *fwnames[])
+{
+ unsigned int idx;
+ int ret = -EINVAL;
+ for (idx = 0; idx < fwcount; idx++) {
+ ret = request_firmware(fw_entry,
+ fwnames[idx],
+ &hdw->usb_dev->dev);
+ if (!ret) {
+ trace_firmware("Located %s firmware: %s;"
+ " uploading...",
+ fwtypename,
+ fwnames[idx]);
+ return idx;
+ }
+ if (ret == -ENOENT) continue;
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware fatal error with code=%d",ret);
+ return ret;
+ }
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "***WARNING***"
+ " Device %s firmware"
+ " seems to be missing.",
+ fwtypename);
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Did you install the pvrusb2 firmware files"
+ " in their proper location?");
+ if (fwcount == 1) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware unable to locate %s file %s",
+ fwtypename,fwnames[0]);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware unable to locate"
+ " one of the following %s files:",
+ fwtypename);
+ for (idx = 0; idx < fwcount; idx++) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware: Failed to find %s",
+ fwnames[idx]);
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * pvr2_upload_firmware1().
+ *
+ * Send the 8051 firmware to the device. After the upload, arrange for
+ * device to re-enumerate.
+ *
+ * NOTE : the pointer to the firmware data given by request_firmware()
+ * is not suitable for an usb transaction.
+ *
+ */
+int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
+{
+ const struct firmware *fw_entry = 0;
+ void *fw_ptr;
+ unsigned int pipe;
+ int ret;
+ u16 address;
+ static const char *fw_files_29xxx[] = {
+ "v4l-pvrusb2-29xxx-01.fw",
+ };
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ static const char *fw_files_24xxx[] = {
+ "v4l-pvrusb2-24xxx-01.fw",
+ };
+#endif
+ static const struct pvr2_string_table fw_file_defs[] = {
+ [PVR2_HDW_TYPE_29XXX] = {
+ fw_files_29xxx,
+ sizeof(fw_files_29xxx)/sizeof(fw_files_29xxx[0]),
+ },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = {
+ fw_files_24xxx,
+ sizeof(fw_files_24xxx)/sizeof(fw_files_24xxx[0]),
+ },
+#endif
+ };
+ hdw->fw1_state = FW1_STATE_FAILED; // default result
+
+ trace_firmware("pvr2_upload_firmware1");
+
+ ret = pvr2_locate_firmware(hdw,&fw_entry,"fx2 controller",
+ fw_file_defs[hdw->hdw_type].cnt,
+ fw_file_defs[hdw->hdw_type].lst);
+ if (ret < 0) {
+ if (ret == -ENOENT) hdw->fw1_state = FW1_STATE_MISSING;
+ return ret;
+ }
+
+ usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0);
+ usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f));
+
+ pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
+
+ if (fw_entry->size != 0x2000){
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,"wrong fx2 firmware size");
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ fw_ptr = kmalloc(0x800, GFP_KERNEL);
+ if (fw_ptr == NULL){
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ /* We have to hold the CPU during firmware upload. */
+ pvr2_hdw_cpureset_assert(hdw,1);
+
+ /* upload the firmware to address 0000-1fff in 2048 (=0x800) bytes
+ chunk. */
+
+ ret = 0;
+ for(address = 0; address < fw_entry->size; address += 0x800) {
+ memcpy(fw_ptr, fw_entry->data + address, 0x800);
+ ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address,
+ 0, fw_ptr, 0x800, HZ);
+ }
+
+ trace_firmware("Upload done, releasing device's CPU");
+
+ /* Now release the CPU. It will disconnect and reconnect later. */
+ pvr2_hdw_cpureset_assert(hdw,0);
+
+ kfree(fw_ptr);
+ release_firmware(fw_entry);
+
+ trace_firmware("Upload done (%d bytes sent)",ret);
+
+ /* We should have written 8192 bytes */
+ if (ret == 8192) {
+ hdw->fw1_state = FW1_STATE_RELOAD;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+
+/*
+ * pvr2_upload_firmware2()
+ *
+ * This uploads encoder firmware on endpoint 2.
+ *
+ */
+
+int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
+{
+ const struct firmware *fw_entry = 0;
+ void *fw_ptr;
+ unsigned int pipe, fw_len, fw_done;
+ int actual_length;
+ int ret = 0;
+ int fwidx;
+ static const char *fw_files[] = {
+ CX2341X_FIRM_ENC_FILENAME,
+ };
+
+ trace_firmware("pvr2_upload_firmware2");
+
+ ret = pvr2_locate_firmware(hdw,&fw_entry,"encoder",
+ sizeof(fw_files)/sizeof(fw_files[0]),
+ fw_files);
+ if (ret < 0) return ret;
+ fwidx = ret;
+ ret = 0;
+ /* Since we're about to completely reinitialize the encoder,
+ invalidate our cached copy of its configuration state. Next
+ time we configure the encoder, then we'll fully configure it. */
+ hdw->enc_cur_valid = 0;
+
+ /* First prepare firmware loading */
+ ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/
+ ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/
+ ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
+ ret |= pvr2_hdw_cmd_deep_reset(hdw);
+ ret |= pvr2_write_register(hdw, 0xa064, 0x00000000); /*APU command*/
+ ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000408); /*gpio dir*/
+ ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
+ ret |= pvr2_write_register(hdw, 0x9058, 0xffffffed); /*VPU ctrl*/
+ ret |= pvr2_write_register(hdw, 0x9054, 0xfffffffd); /*reset hw blocks*/
+ ret |= pvr2_write_register(hdw, 0x07f8, 0x80000800); /*encoder SDRAM refresh*/
+ ret |= pvr2_write_register(hdw, 0x07fc, 0x0000001a); /*encoder SDRAM pre-charge*/
+ ret |= pvr2_write_register(hdw, 0x0700, 0x00000000); /*I2C clock*/
+ ret |= pvr2_write_register(hdw, 0xaa00, 0x00000000); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa04, 0x00057810); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa10, 0x00148500); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa18, 0x00840000); /*unknown*/
+ ret |= pvr2_write_u8(hdw, 0x52, 0);
+ ret |= pvr2_write_u16(hdw, 0x0600, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload prep failed, ret=%d",ret);
+ release_firmware(fw_entry);
+ return ret;
+ }
+
+ /* Now send firmware */
+
+ fw_len = fw_entry->size;
+
+ if (fw_len % FIRMWARE_CHUNK_SIZE) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "size of %s firmware"
+ " must be a multiple of 8192B",
+ fw_files[fwidx]);
+ release_firmware(fw_entry);
+ return -1;
+ }
+
+ fw_ptr = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
+ if (fw_ptr == NULL){
+ release_firmware(fw_entry);
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "failed to allocate memory for firmware2 upload");
+ return -ENOMEM;
+ }
+
+ pipe = usb_sndbulkpipe(hdw->usb_dev, PVR2_FIRMWARE_ENDPOINT);
+
+ for (fw_done = 0 ; (fw_done < fw_len) && !ret ;
+ fw_done += FIRMWARE_CHUNK_SIZE ) {
+ int i;
+ memcpy(fw_ptr, fw_entry->data + fw_done, FIRMWARE_CHUNK_SIZE);
+ /* Usbsnoop log shows that we must swap bytes... */
+ for (i = 0; i < FIRMWARE_CHUNK_SIZE/4 ; i++)
+ ((u32 *)fw_ptr)[i] = ___swab32(((u32 *)fw_ptr)[i]);
+
+ ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,
+ FIRMWARE_CHUNK_SIZE,
+ &actual_length, HZ);
+ ret |= (actual_length != FIRMWARE_CHUNK_SIZE);
+ }
+
+ trace_firmware("upload of %s : %i / %i ",
+ fw_files[fwidx],fw_done,fw_len);
+
+ kfree(fw_ptr);
+ release_firmware(fw_entry);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload transfer failure");
+ return ret;
+ }
+
+ /* Finish upload */
+
+ ret |= pvr2_write_register(hdw, 0x9054, 0xffffffff); /*reset hw blocks*/
+ ret |= pvr2_write_register(hdw, 0x9058, 0xffffffe8); /*VPU ctrl*/
+ ret |= pvr2_write_u16(hdw, 0x0600, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload post-proc failure");
+ } else {
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE);
+ }
+ return ret;
+}
+
+
+#define FIRMWARE_RECOVERY_BITS \
+ ((1<<PVR2_SUBSYS_B_ENC_CFG) | \
+ (1<<PVR2_SUBSYS_B_ENC_RUN) | \
+ (1<<PVR2_SUBSYS_B_ENC_FIRMWARE) | \
+ (1<<PVR2_SUBSYS_B_USBSTREAM_RUN))
+
+/*
+
+ This single function is key to pretty much everything. The pvrusb2
+ device can logically be viewed as a series of subsystems which can be
+ stopped / started or unconfigured / configured. To get things streaming,
+ one must configure everything and start everything, but there may be
+ various reasons over time to deconfigure something or stop something.
+ This function handles all of this activity. Everything EVERYWHERE that
+ must affect a subsystem eventually comes here to do the work.
+
+ The current state of all subsystems is represented by a single bit mask,
+ known as subsys_enabled_mask. The bit positions are defined by the
+ PVR2_SUBSYS_xxxx macros, with one subsystem per bit position. At any
+ time the set of configured or active subsystems can be queried just by
+ looking at that mask. To change bits in that mask, this function here
+ must be called. The "msk" argument indicates which bit positions to
+ change, and the "val" argument defines the new values for the positions
+ defined by "msk".
+
+ There is a priority ordering of starting / stopping things, and for
+ multiple requested changes, this function implements that ordering.
+ (Thus we will act on a request to load encoder firmware before we
+ configure the encoder.) In addition to priority ordering, there is a
+ recovery strategy implemented here. If a particular step fails and we
+ detect that failure, this function will clear the affected subsystem bits
+ and restart. Thus we have a means for recovering from a dead encoder:
+ Clear all bits that correspond to subsystems that we need to restart /
+ reconfigure and start over.
+
+*/
+void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val)
+{
+ unsigned long nmsk;
+ unsigned long vmsk;
+ int ret;
+ unsigned int tryCount = 0;
+
+ if (!hdw->flag_ok) return;
+
+ msk &= PVR2_SUBSYS_ALL;
+ nmsk = (hdw->subsys_enabled_mask & ~msk) | (val & msk);
+ nmsk &= PVR2_SUBSYS_ALL;
+
+ for (;;) {
+ tryCount++;
+ if (!((nmsk ^ hdw->subsys_enabled_mask) &
+ PVR2_SUBSYS_ALL)) break;
+ if (tryCount > 4) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Too many retries when configuring device;"
+ " giving up");
+ pvr2_hdw_render_useless(hdw);
+ break;
+ }
+ if (tryCount > 1) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Retrying device reconfiguration");
+ }
+ pvr2_trace(PVR2_TRACE_INIT,
+ "subsys mask changing 0x%lx:0x%lx"
+ " from 0x%lx to 0x%lx",
+ msk,val,hdw->subsys_enabled_mask,nmsk);
+
+ vmsk = (nmsk ^ hdw->subsys_enabled_mask) &
+ hdw->subsys_enabled_mask;
+ if (vmsk) {
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_stop");
+ ret = pvr2_encoder_stop(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_hdw_cmd_usbstream(0)");
+ pvr2_hdw_cmd_usbstream(hdw,0);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " decoder disable");
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->enable(
+ hdw->decoder_ctrl->ctxt,0);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " No decoder present");
+ }
+ hdw->subsys_enabled_mask &=
+ ~(1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
+ }
+ if (vmsk & PVR2_SUBSYS_CFG_ALL) {
+ hdw->subsys_enabled_mask &=
+ ~(vmsk & PVR2_SUBSYS_CFG_ALL);
+ }
+ }
+ vmsk = (nmsk ^ hdw->subsys_enabled_mask) & nmsk;
+ if (vmsk) {
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_FIRMWARE)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_upload_firmware2");
+ ret = pvr2_upload_firmware2(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failure uploading encoder"
+ " firmware");
+ pvr2_hdw_render_useless(hdw);
+ break;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_CFG)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_configure");
+ ret = pvr2_encoder_configure(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " decoder enable");
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->enable(
+ hdw->decoder_ctrl->ctxt,!0);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " No decoder present");
+ }
+ hdw->subsys_enabled_mask |=
+ (1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_hdw_cmd_usbstream(1)");
+ pvr2_hdw_cmd_usbstream(hdw,!0);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_start");
+ ret = pvr2_encoder_start(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ }
+ }
+}
+
+
+void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,msk,val);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk)
+{
+ pvr2_hdw_subsys_bit_chg(hdw,msk,msk);
+}
+
+
+void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk)
+{
+ pvr2_hdw_subsys_bit_chg(hdw,msk,0);
+}
+
+
+unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *hdw)
+{
+ return hdw->subsys_enabled_mask;
+}
+
+
+unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *hdw)
+{
+ return hdw->subsys_stream_mask;
+}
+
+
+void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val)
+{
+ unsigned long val2;
+ msk &= PVR2_SUBSYS_ALL;
+ val2 = ((hdw->subsys_stream_mask & ~msk) | (val & msk));
+ pvr2_trace(PVR2_TRACE_INIT,
+ "stream mask changing 0x%lx:0x%lx from 0x%lx to 0x%lx",
+ msk,val,hdw->subsys_stream_mask,val2);
+ hdw->subsys_stream_mask = val2;
+}
+
+
+void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_subsys_stream_bit_chg_no_lock(hdw,msk,val);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+int pvr2_hdw_set_streaming_no_lock(struct pvr2_hdw *hdw,int enableFl)
+{
+ if ((!enableFl) == !(hdw->flag_streaming_enabled)) return 0;
+ if (enableFl) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*--TRACE_STREAM--*/ enable");
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,~0);
+ } else {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*--TRACE_STREAM--*/ disable");
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
+ }
+ if (!hdw->flag_ok) return -EIO;
+ hdw->flag_streaming_enabled = enableFl != 0;
+ return 0;
+}
+
+
+int pvr2_hdw_get_streaming(struct pvr2_hdw *hdw)
+{
+ return hdw->flag_streaming_enabled != 0;
+}
+
+
+int pvr2_hdw_set_streaming(struct pvr2_hdw *hdw,int enable_flag)
+{
+ int ret;
+ LOCK_TAKE(hdw->big_lock); do {
+ ret = pvr2_hdw_set_streaming_no_lock(hdw,enable_flag);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+int pvr2_hdw_set_stream_type_no_lock(struct pvr2_hdw *hdw,
+ enum pvr2_config config)
+{
+ unsigned long sm = hdw->subsys_enabled_mask;
+ if (!hdw->flag_ok) return -EIO;
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
+ hdw->config = config;
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,sm);
+ return 0;
+}
+
+
+int pvr2_hdw_set_stream_type(struct pvr2_hdw *hdw,enum pvr2_config config)
+{
+ int ret;
+ if (!hdw->flag_ok) return -EIO;
+ LOCK_TAKE(hdw->big_lock);
+ ret = pvr2_hdw_set_stream_type_no_lock(hdw,config);
+ LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+static int get_default_tuner_type(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = -1;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = tuner[unit_number];
+ }
+ if (tp < 0) return -EINVAL;
+ hdw->tuner_type = tp;
+ return 0;
+}
+
+
+static v4l2_std_id get_default_standard(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = 0;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = video_std[unit_number];
+ }
+ return tp;
+}
+
+
+static unsigned int get_default_error_tolerance(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = 0;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = tolerance[unit_number];
+ }
+ return tp;
+}
+
+
+static int pvr2_hdw_check_firmware(struct pvr2_hdw *hdw)
+{
+ /* Try a harmless request to fetch the eeprom's address over
+ endpoint 1. See what happens. Only the full FX2 image can
+ respond to this. If this probe fails then likely the FX2
+ firmware needs be loaded. */
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0xeb;
+ result = pvr2_send_request_ex(hdw,HZ*1,!0,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ if (result) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Probe of device endpoint 1 result status %d",
+ result);
+ } else {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Probe of device endpoint 1 succeeded");
+ }
+ return result == 0;
+}
+
+static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
+{
+ char buf[40];
+ unsigned int bcnt;
+ v4l2_std_id std1,std2;
+
+ std1 = get_default_standard(hdw);
+
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Supported video standard(s) reported by eeprom: %.*s",
+ bcnt,buf);
+
+ hdw->std_mask_avail = hdw->std_mask_eeprom;
+
+ std2 = std1 & ~hdw->std_mask_avail;
+ if (std2) {
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Expanding supported video standards"
+ " to include: %.*s",
+ bcnt,buf);
+ hdw->std_mask_avail |= std2;
+ }
+
+ pvr2_hdw_internal_set_std_avail(hdw);
+
+ if (std1) {
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Initial video standard forced to %.*s",
+ bcnt,buf);
+ hdw->std_mask_cur = std1;
+ hdw->std_dirty = !0;
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return;
+ }
+
+ if (hdw->std_enum_cnt > 1) {
+ // Autoselect the first listed standard
+ hdw->std_enum_cur = 1;
+ hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id;
+ hdw->std_dirty = !0;
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Initial video standard auto-selected to %s",
+ hdw->std_defs[hdw->std_enum_cur-1].name);
+ return;
+ }
+
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Unable to select a viable initial video standard");
+}
+
+
+static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
+{
+ int ret;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int reloadFl = 0;
+ if (!reloadFl) {
+ reloadFl = (hdw->usb_intf->cur_altsetting->desc.bNumEndpoints
+ == 0);
+ if (reloadFl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "USB endpoint config looks strange"
+ "; possibly firmware needs to be loaded");
+ }
+ }
+ if (!reloadFl) {
+ reloadFl = !pvr2_hdw_check_firmware(hdw);
+ if (reloadFl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Check for FX2 firmware failed"
+ "; possibly firmware needs to be loaded");
+ }
+ }
+ if (reloadFl) {
+ if (pvr2_upload_firmware1(hdw) != 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failure uploading firmware1");
+ }
+ return;
+ }
+ hdw->fw1_state = FW1_STATE_OK;
+
+ if (initusbreset) {
+ pvr2_hdw_device_reset(hdw);
+ }
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ for (idx = 0; idx < pvr2_client_lists[hdw->hdw_type].cnt; idx++) {
+ request_module(pvr2_client_lists[hdw->hdw_type].lst[idx]);
+ }
+
+ pvr2_hdw_cmd_powerup(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ if (pvr2_upload_firmware2(hdw)){
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,"device unstable!!");
+ pvr2_hdw_render_useless(hdw);
+ return;
+ }
+
+ // This step MUST happen after the earlier powerup step.
+ pvr2_i2c_core_init(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx;
+ if (cptr->info->skip_init) continue;
+ if (!cptr->info->set_value) continue;
+ cptr->info->set_value(cptr,~0,cptr->info->default_value);
+ }
+
+ // Do not use pvr2_reset_ctl_endpoints() here. It is not
+ // thread-safe against the normal pvr2_send_request() mechanism.
+ // (We should make it thread safe).
+
+ ret = pvr2_hdw_get_eeprom_addr(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ if (ret < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Unable to determine location of eeprom, skipping");
+ } else {
+ hdw->eeprom_addr = ret;
+ pvr2_eeprom_analyze(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ }
+
+ pvr2_hdw_setup_std(hdw);
+
+ if (!get_default_tuner_type(hdw)) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: Tuner type overridden to %d",
+ hdw->tuner_type);
+ }
+
+ hdw->tuner_updated = !0;
+ pvr2_i2c_core_check_stale(hdw);
+ hdw->tuner_updated = 0;
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ pvr2_hdw_commit_ctl_internal(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ hdw->vid_stream = pvr2_stream_create();
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: video stream is %p",hdw->vid_stream);
+ if (hdw->vid_stream) {
+ idx = get_default_error_tolerance(hdw);
+ if (idx) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: video stream %p"
+ " setting tolerance %u",
+ hdw->vid_stream,idx);
+ }
+ pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
+ PVR2_VID_ENDPOINT,idx);
+ }
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ /* Make sure everything is up to date */
+ pvr2_i2c_core_sync(hdw);
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ hdw->flag_init_ok = !0;
+}
+
+
+int pvr2_hdw_setup(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) begin",hdw);
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_setup_low(hdw);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup(hdw=%p) done, ok=%d init_ok=%d",
+ hdw,hdw->flag_ok,hdw->flag_init_ok);
+ if (pvr2_hdw_dev_ok(hdw)) {
+ if (pvr2_hdw_init_ok(hdw)) {
+ pvr2_trace(
+ PVR2_TRACE_INFO,
+ "Device initialization"
+ " completed successfully.");
+ break;
+ }
+ if (hdw->fw1_state == FW1_STATE_RELOAD) {
+ pvr2_trace(
+ PVR2_TRACE_INFO,
+ "Device microcontroller firmware"
+ " (re)loaded; it should now reset"
+ " and reconnect.");
+ break;
+ }
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Device initialization was not successful.");
+ if (hdw->fw1_state == FW1_STATE_MISSING) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Giving up since device"
+ " microcontroller firmware"
+ " appears to be missing.");
+ break;
+ }
+ }
+ if (procreload) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempting pvrusb2 recovery by reloading"
+ " primary firmware.");
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "If this works, device should disconnect"
+ " and reconnect in a sane state.");
+ hdw->fw1_state = FW1_STATE_UNKNOWN;
+ pvr2_upload_firmware1(hdw);
+ } else {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "***WARNING*** pvrusb2 device hardware"
+ " appears to be jammed"
+ " and I can't clear it.");
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "You might need to power cycle"
+ " the pvrusb2 device"
+ " in order to recover.");
+ }
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
+ return hdw->flag_init_ok;
+}
+
+
+/* Create and return a structure for interacting with the underlying
+ hardware */
+struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
+ const struct usb_device_id *devid)
+{
+ unsigned int idx,cnt1,cnt2;
+ struct pvr2_hdw *hdw;
+ unsigned int hdw_type;
+ int valid_std_mask;
+ struct pvr2_ctrl *cptr;
+ __u8 ifnum;
+ struct v4l2_queryctrl qctrl;
+ struct pvr2_ctl_info *ciptr;
+
+ hdw_type = devid - pvr2_device_table;
+ if (hdw_type >=
+ sizeof(pvr2_device_names)/sizeof(pvr2_device_names[0])) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Bogus device type of %u reported",hdw_type);
+ return 0;
+ }
+
+ hdw = kmalloc(sizeof(*hdw),GFP_KERNEL);
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_create: hdw=%p, type \"%s\"",
+ hdw,pvr2_device_names[hdw_type]);
+ if (!hdw) goto fail;
+ memset(hdw,0,sizeof(*hdw));
+ cx2341x_fill_defaults(&hdw->enc_ctl_state);
+
+ hdw->control_cnt = CTRLDEF_COUNT;
+ hdw->control_cnt += MPEGDEF_COUNT;
+ hdw->controls = kmalloc(sizeof(struct pvr2_ctrl) * hdw->control_cnt,
+ GFP_KERNEL);
+ if (!hdw->controls) goto fail;
+ memset(hdw->controls,0,sizeof(struct pvr2_ctrl) * hdw->control_cnt);
+ hdw->hdw_type = hdw_type;
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ cptr->hdw = hdw;
+ }
+ for (idx = 0; idx < 32; idx++) {
+ hdw->std_mask_ptrs[idx] = hdw->std_mask_names[idx];
+ }
+ for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx;
+ cptr->info = control_defs+idx;
+ }
+ /* Define and configure additional controls from cx2341x module. */
+ hdw->mpeg_ctrl_info = kmalloc(
+ sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT, GFP_KERNEL);
+ if (!hdw->mpeg_ctrl_info) goto fail;
+ memset(hdw->mpeg_ctrl_info,0,
+ sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT);
+ for (idx = 0; idx < MPEGDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx + CTRLDEF_COUNT;
+ ciptr = &(hdw->mpeg_ctrl_info[idx].info);
+ ciptr->desc = hdw->mpeg_ctrl_info[idx].desc;
+ ciptr->name = mpeg_ids[idx].strid;
+ ciptr->v4l_id = mpeg_ids[idx].id;
+ ciptr->skip_init = !0;
+ ciptr->get_value = ctrl_cx2341x_get;
+ ciptr->get_v4lflags = ctrl_cx2341x_getv4lflags;
+ ciptr->is_dirty = ctrl_cx2341x_is_dirty;
+ if (!idx) ciptr->clear_dirty = ctrl_cx2341x_clear_dirty;
+ qctrl.id = ciptr->v4l_id;
+ cx2341x_ctrl_query(&hdw->enc_ctl_state,&qctrl);
+ if (!(qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ciptr->set_value = ctrl_cx2341x_set;
+ }
+ strncpy(hdw->mpeg_ctrl_info[idx].desc,qctrl.name,
+ PVR2_CTLD_INFO_DESC_SIZE);
+ hdw->mpeg_ctrl_info[idx].desc[PVR2_CTLD_INFO_DESC_SIZE-1] = 0;
+ ciptr->default_value = qctrl.default_value;
+ switch (qctrl.type) {
+ default:
+ case V4L2_CTRL_TYPE_INTEGER:
+ ciptr->type = pvr2_ctl_int;
+ ciptr->def.type_int.min_value = qctrl.minimum;
+ ciptr->def.type_int.max_value = qctrl.maximum;
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ciptr->type = pvr2_ctl_bool;
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ ciptr->type = pvr2_ctl_enum;
+ ciptr->def.type_enum.value_names =
+ cx2341x_ctrl_get_menu(ciptr->v4l_id);
+ for (cnt1 = 0;
+ ciptr->def.type_enum.value_names[cnt1] != NULL;
+ cnt1++) { }
+ ciptr->def.type_enum.count = cnt1;
+ break;
+ }
+ cptr->info = ciptr;
+ }
+
+ // Initialize video standard enum dynamic control
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM);
+ if (cptr) {
+ memcpy(&hdw->std_info_enum,cptr->info,
+ sizeof(hdw->std_info_enum));
+ cptr->info = &hdw->std_info_enum;
+
+ }
+ // Initialize control data regarding video standard masks
+ valid_std_mask = pvr2_std_get_usable();
+ for (idx = 0; idx < 32; idx++) {
+ if (!(valid_std_mask & (1 << idx))) continue;
+ cnt1 = pvr2_std_id_to_str(
+ hdw->std_mask_names[idx],
+ sizeof(hdw->std_mask_names[idx])-1,
+ 1 << idx);
+ hdw->std_mask_names[idx][cnt1] = 0;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDAVAIL);
+ if (cptr) {
+ memcpy(&hdw->std_info_avail,cptr->info,
+ sizeof(hdw->std_info_avail));
+ cptr->info = &hdw->std_info_avail;
+ hdw->std_info_avail.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_avail.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR);
+ if (cptr) {
+ memcpy(&hdw->std_info_cur,cptr->info,
+ sizeof(hdw->std_info_cur));
+ cptr->info = &hdw->std_info_cur;
+ hdw->std_info_cur.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_avail.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+
+ hdw->eeprom_addr = -1;
+ hdw->unit_number = -1;
+ hdw->v4l_minor_number = -1;
+ hdw->ctl_write_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
+ if (!hdw->ctl_write_buffer) goto fail;
+ hdw->ctl_read_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
+ if (!hdw->ctl_read_buffer) goto fail;
+ hdw->ctl_write_urb = usb_alloc_urb(0,GFP_KERNEL);
+ if (!hdw->ctl_write_urb) goto fail;
+ hdw->ctl_read_urb = usb_alloc_urb(0,GFP_KERNEL);
+ if (!hdw->ctl_read_urb) goto fail;
+
+ down(&pvr2_unit_sem); do {
+ for (idx = 0; idx < PVR_NUM; idx++) {
+ if (unit_pointers[idx]) continue;
+ hdw->unit_number = idx;
+ unit_pointers[idx] = hdw;
+ break;
+ }
+ } while (0); up(&pvr2_unit_sem);
+
+ cnt1 = 0;
+ cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
+ cnt1 += cnt2;
+ if (hdw->unit_number >= 0) {
+ cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"_%c",
+ ('a' + hdw->unit_number));
+ cnt1 += cnt2;
+ }
+ if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
+ hdw->name[cnt1] = 0;
+
+ pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
+ hdw->unit_number,hdw->name);
+
+ hdw->tuner_type = -1;
+ hdw->flag_ok = !0;
+ /* Initialize the mask of subsystems that we will shut down when we
+ stop streaming. */
+ hdw->subsys_stream_mask = PVR2_SUBSYS_RUN_ALL;
+ hdw->subsys_stream_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+
+ pvr2_trace(PVR2_TRACE_INIT,"subsys_stream_mask: 0x%lx",
+ hdw->subsys_stream_mask);
+
+ hdw->usb_intf = intf;
+ hdw->usb_dev = interface_to_usbdev(intf);
+
+ ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber;
+ usb_set_interface(hdw->usb_dev,ifnum,0);
+
+ mutex_init(&hdw->ctl_lock_mutex);
+ mutex_init(&hdw->big_lock_mutex);
+
+ return hdw;
+ fail:
+ if (hdw) {
+ if (hdw->ctl_read_urb) usb_free_urb(hdw->ctl_read_urb);
+ if (hdw->ctl_write_urb) usb_free_urb(hdw->ctl_write_urb);
+ if (hdw->ctl_read_buffer) kfree(hdw->ctl_read_buffer);
+ if (hdw->ctl_write_buffer) kfree(hdw->ctl_write_buffer);
+ if (hdw->controls) kfree(hdw->controls);
+ if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
+ kfree(hdw);
+ }
+ return 0;
+}
+
+
+/* Remove _all_ associations between this driver and the underlying USB
+ layer. */
+void pvr2_hdw_remove_usb_stuff(struct pvr2_hdw *hdw)
+{
+ if (hdw->flag_disconnected) return;
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_remove_usb_stuff: hdw=%p",hdw);
+ if (hdw->ctl_read_urb) {
+ usb_kill_urb(hdw->ctl_read_urb);
+ usb_free_urb(hdw->ctl_read_urb);
+ hdw->ctl_read_urb = 0;
+ }
+ if (hdw->ctl_write_urb) {
+ usb_kill_urb(hdw->ctl_write_urb);
+ usb_free_urb(hdw->ctl_write_urb);
+ hdw->ctl_write_urb = 0;
+ }
+ if (hdw->ctl_read_buffer) {
+ kfree(hdw->ctl_read_buffer);
+ hdw->ctl_read_buffer = 0;
+ }
+ if (hdw->ctl_write_buffer) {
+ kfree(hdw->ctl_write_buffer);
+ hdw->ctl_write_buffer = 0;
+ }
+ pvr2_hdw_render_useless_unlocked(hdw);
+ hdw->flag_disconnected = !0;
+ hdw->usb_dev = 0;
+ hdw->usb_intf = 0;
+}
+
+
+/* Destroy hardware interaction structure */
+void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw);
+ if (hdw->fw_buffer) {
+ kfree(hdw->fw_buffer);
+ hdw->fw_buffer = 0;
+ }
+ if (hdw->vid_stream) {
+ pvr2_stream_destroy(hdw->vid_stream);
+ hdw->vid_stream = 0;
+ }
+ if (hdw->audio_stat) {
+ hdw->audio_stat->detach(hdw->audio_stat->ctxt);
+ }
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->detach(hdw->decoder_ctrl->ctxt);
+ }
+ pvr2_i2c_core_done(hdw);
+ pvr2_hdw_remove_usb_stuff(hdw);
+ down(&pvr2_unit_sem); do {
+ if ((hdw->unit_number >= 0) &&
+ (hdw->unit_number < PVR_NUM) &&
+ (unit_pointers[hdw->unit_number] == hdw)) {
+ unit_pointers[hdw->unit_number] = 0;
+ }
+ } while (0); up(&pvr2_unit_sem);
+ if (hdw->controls) kfree(hdw->controls);
+ if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
+ if (hdw->std_defs) kfree(hdw->std_defs);
+ if (hdw->std_enum_names) kfree(hdw->std_enum_names);
+ kfree(hdw);
+}
+
+
+int pvr2_hdw_init_ok(struct pvr2_hdw *hdw)
+{
+ return hdw->flag_init_ok;
+}
+
+
+int pvr2_hdw_dev_ok(struct pvr2_hdw *hdw)
+{
+ return (hdw && hdw->flag_ok);
+}
+
+
+/* Called when hardware has been unplugged */
+void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
+ LOCK_TAKE(hdw->big_lock);
+ LOCK_TAKE(hdw->ctl_lock);
+ pvr2_hdw_remove_usb_stuff(hdw);
+ LOCK_GIVE(hdw->ctl_lock);
+ LOCK_GIVE(hdw->big_lock);
+}
+
+
+// Attempt to autoselect an appropriate value for std_enum_cur given
+// whatever is currently in std_mask_cur
+void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw)
+{
+ unsigned int idx;
+ for (idx = 1; idx < hdw->std_enum_cnt; idx++) {
+ if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) {
+ hdw->std_enum_cur = idx;
+ return;
+ }
+ }
+ hdw->std_enum_cur = 0;
+}
+
+
+// Calculate correct set of enumerated standards based on currently known
+// set of available standards bits.
+void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw)
+{
+ struct v4l2_standard *newstd;
+ unsigned int std_cnt;
+ unsigned int idx;
+
+ newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail);
+
+ if (hdw->std_defs) {
+ kfree(hdw->std_defs);
+ hdw->std_defs = 0;
+ }
+ hdw->std_enum_cnt = 0;
+ if (hdw->std_enum_names) {
+ kfree(hdw->std_enum_names);
+ hdw->std_enum_names = 0;
+ }
+
+ if (!std_cnt) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Failed to identify any viable standards");
+ }
+ hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL);
+ hdw->std_enum_names[0] = "none";
+ for (idx = 0; idx < std_cnt; idx++) {
+ hdw->std_enum_names[idx+1] =
+ newstd[idx].name;
+ }
+ // Set up the dynamic control for this standard
+ hdw->std_info_enum.def.type_enum.value_names = hdw->std_enum_names;
+ hdw->std_info_enum.def.type_enum.count = std_cnt+1;
+ hdw->std_defs = newstd;
+ hdw->std_enum_cnt = std_cnt+1;
+ hdw->std_enum_cur = 0;
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
+}
+
+
+int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,
+ struct v4l2_standard *std,
+ unsigned int idx)
+{
+ int ret = -EINVAL;
+ if (!idx) return ret;
+ LOCK_TAKE(hdw->big_lock); do {
+ if (idx >= hdw->std_enum_cnt) break;
+ idx--;
+ memcpy(std,hdw->std_defs+idx,sizeof(*std));
+ ret = 0;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+/* Get the number of defined controls */
+unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw)
+{
+ return hdw->control_cnt;
+}
+
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *hdw,
+ unsigned int idx)
+{
+ if (idx >= hdw->control_cnt) return 0;
+ return hdw->controls + idx;
+}
+
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *hdw,
+ unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->internal_id;
+ if (i && (i == ctl_id)) return cptr;
+ }
+ return 0;
+}
+
+
+/* Given a V4L ID, retrieve the control structure associated with it. */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *hdw,unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->v4l_id;
+ if (i && (i == ctl_id)) return cptr;
+ }
+ return 0;
+}
+
+
+/* Given a V4L ID for its immediate predecessor, retrieve the control
+ structure associated with it. */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *hdw,
+ unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr,*cp2;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ cp2 = 0;
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->v4l_id;
+ if (!i) continue;
+ if (i <= ctl_id) continue;
+ if (cp2 && (cp2->info->v4l_id < i)) continue;
+ cp2 = cptr;
+ }
+ return cp2;
+ return 0;
+}
+
+
+static const char *get_ctrl_typename(enum pvr2_ctl_type tp)
+{
+ switch (tp) {
+ case pvr2_ctl_int: return "integer";
+ case pvr2_ctl_enum: return "enum";
+ case pvr2_ctl_bool: return "boolean";
+ case pvr2_ctl_bitmask: return "bitmask";
+ }
+ return "";
+}
+
+
+/* Commit all control changes made up to this point. Subsystems can be
+ indirectly affected by these changes. For a given set of things being
+ committed, we'll clear the affected subsystem bits and then once we're
+ done committing everything we'll make a request to restore the subsystem
+ state(s) back to their previous value before this function was called.
+ Thus we can automatically reconfigure affected pieces of the driver as
+ controls are changed. */
+int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw)
+{
+ unsigned long saved_subsys_mask = hdw->subsys_enabled_mask;
+ unsigned long stale_subsys_mask = 0;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int value;
+ int commit_flag = 0;
+ char buf[100];
+ unsigned int bcnt,ccnt;
+
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ if (cptr->info->is_dirty == 0) continue;
+ if (!cptr->info->is_dirty(cptr)) continue;
+ if (!commit_flag) {
+ commit_flag = !0;
+ }
+
+ bcnt = scnprintf(buf,sizeof(buf),"\"%s\" <-- ",
+ cptr->info->name);
+ value = 0;
+ cptr->info->get_value(cptr,&value);
+ pvr2_ctrl_value_to_sym_internal(cptr,~0,value,
+ buf+bcnt,
+ sizeof(buf)-bcnt,&ccnt);
+ bcnt += ccnt;
+ bcnt += scnprintf(buf+bcnt,sizeof(buf)-bcnt," <%s>",
+ get_ctrl_typename(cptr->info->type));
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*--TRACE_COMMIT--*/ %.*s",
+ bcnt,buf);
+ }
+
+ if (!commit_flag) {
+ /* Nothing has changed */
+ return 0;
+ }
+
+ /* When video standard changes, reset the hres and vres values -
+ but if the user has pending changes there, then let the changes
+ take priority. */
+ if (hdw->std_dirty) {
+ /* Rewrite the vertical resolution to be appropriate to the
+ video standard that has been selected. */
+ int nvres;
+ if (hdw->std_mask_cur & V4L2_STD_525_60) {
+ nvres = 480;
+ } else {
+ nvres = 576;
+ }
+ if (nvres != hdw->res_ver_val) {
+ hdw->res_ver_val = nvres;
+ hdw->res_ver_dirty = !0;
+ }
+ }
+
+ if (hdw->std_dirty ||
+ 0) {
+ /* If any of this changes, then the encoder needs to be
+ reconfigured, and we need to reset the stream. */
+ stale_subsys_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+ stale_subsys_mask |= hdw->subsys_stream_mask;
+ }
+
+ if (hdw->srate_dirty) {
+ /* Write new sample rate into control structure since
+ * the master copy is stale. We must track srate
+ * separate from the mpeg control structure because
+ * other logic also uses this value. */
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ;
+ c1.value = hdw->srate_val;
+ cx2341x_ext_ctrls(&hdw->enc_ctl_state,&cs,VIDIOC_S_EXT_CTRLS);
+ }
+
+ /* Scan i2c core at this point - before we clear all the dirty
+ bits. Various parts of the i2c core will notice dirty bits as
+ appropriate and arrange to broadcast or directly send updates to
+ the client drivers in order to keep everything in sync */
+ pvr2_i2c_core_check_stale(hdw);
+
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ if (!cptr->info->clear_dirty) continue;
+ cptr->info->clear_dirty(cptr);
+ }
+
+ /* Now execute i2c core update */
+ pvr2_i2c_core_sync(hdw);
+
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,stale_subsys_mask,0);
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,saved_subsys_mask);
+
+ return 0;
+}
+
+
+int pvr2_hdw_commit_ctl(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_commit_ctl_internal(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return 0;
+}
+
+
+void pvr2_hdw_poll(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_i2c_core_sync(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *hdw,
+ void (*func)(void *),
+ void *data)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ hdw->poll_trigger_func = func;
+ hdw->poll_trigger_data = data;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *hdw)
+{
+ if (hdw->poll_trigger_func) {
+ hdw->poll_trigger_func(hdw->poll_trigger_data);
+ }
+}
+
+
+void pvr2_hdw_poll_trigger(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_poll_trigger_unlocked(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+/* Return name for this driver instance */
+const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *hdw)
+{
+ return hdw->name;
+}
+
+
+/* Return bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *hdw)
+{
+ unsigned int msk = 0;
+ switch (hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_RADIO:
+ if (hdw->decoder_ctrl &&
+ hdw->decoder_ctrl->tuned(hdw->decoder_ctrl->ctxt)) {
+ msk |= PVR2_SIGNAL_OK;
+ if (hdw->audio_stat &&
+ hdw->audio_stat->status(hdw->audio_stat->ctxt)) {
+ if (hdw->flag_stereo) {
+ msk |= PVR2_SIGNAL_STEREO;
+ }
+ if (hdw->flag_bilingual) {
+ msk |= PVR2_SIGNAL_SAP;
+ }
+ }
+ }
+ break;
+ default:
+ msk |= PVR2_SIGNAL_OK | PVR2_SIGNAL_STEREO;
+ }
+ return msk;
+}
+
+
+int pvr2_hdw_is_hsm(struct pvr2_hdw *hdw)
+{
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0x0b;
+ result = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ result = (hdw->cmd_buffer[0] != 0);
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ return result;
+}
+
+
+/* Return bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *hdw)
+{
+ unsigned int msk = 0;
+ LOCK_TAKE(hdw->big_lock); do {
+ msk = pvr2_hdw_get_signal_status_internal(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return msk;
+}
+
+
+/* Get handle to video output stream */
+struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *hp)
+{
+ return hp->vid_stream;
+}
+
+
+void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw)
+{
+ int nr = pvr2_hdw_get_unit_number(hdw);
+ LOCK_TAKE(hdw->big_lock); do {
+ hdw->log_requested = !0;
+ printk(KERN_INFO "pvrusb2: ================= START STATUS CARD #%d =================\n", nr);
+ pvr2_i2c_core_check_stale(hdw);
+ hdw->log_requested = 0;
+ pvr2_i2c_core_sync(hdw);
+ pvr2_trace(PVR2_TRACE_INFO,"cx2341x config:");
+ cx2341x_log_status(&hdw->enc_ctl_state, "pvrusb2");
+ printk(KERN_INFO "pvrusb2: ================== END STATUS CARD #%d ==================\n", nr);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, int enable_flag)
+{
+ int ret;
+ u16 address;
+ unsigned int pipe;
+ LOCK_TAKE(hdw->big_lock); do {
+ if ((hdw->fw_buffer == 0) == !enable_flag) break;
+
+ if (!enable_flag) {
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Cleaning up after CPU firmware fetch");
+ kfree(hdw->fw_buffer);
+ hdw->fw_buffer = 0;
+ hdw->fw_size = 0;
+ /* Now release the CPU. It will disconnect and
+ reconnect later. */
+ pvr2_hdw_cpureset_assert(hdw,0);
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Preparing to suck out CPU firmware");
+ hdw->fw_size = 0x2000;
+ hdw->fw_buffer = kmalloc(hdw->fw_size,GFP_KERNEL);
+ if (!hdw->fw_buffer) {
+ hdw->fw_size = 0;
+ break;
+ }
+
+ memset(hdw->fw_buffer,0,hdw->fw_size);
+
+ /* We have to hold the CPU during firmware upload. */
+ pvr2_hdw_cpureset_assert(hdw,1);
+
+ /* download the firmware from address 0000-1fff in 2048
+ (=0x800) bytes chunk. */
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,"Grabbing CPU firmware");
+ pipe = usb_rcvctrlpipe(hdw->usb_dev, 0);
+ for(address = 0; address < hdw->fw_size; address += 0x800) {
+ ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0xc0,
+ address,0,
+ hdw->fw_buffer+address,0x800,HZ);
+ if (ret < 0) break;
+ }
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,"Done grabbing CPU firmware");
+
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+/* Return true if we're in a mode for retrieval CPU firmware */
+int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *hdw)
+{
+ return hdw->fw_buffer != 0;
+}
+
+
+int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs,
+ char *buf,unsigned int cnt)
+{
+ int ret = -EINVAL;
+ LOCK_TAKE(hdw->big_lock); do {
+ if (!buf) break;
+ if (!cnt) break;
+
+ if (!hdw->fw_buffer) {
+ ret = -EIO;
+ break;
+ }
+
+ if (offs >= hdw->fw_size) {
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Read firmware data offs=%d EOF",
+ offs);
+ ret = 0;
+ break;
+ }
+
+ if (offs + cnt > hdw->fw_size) cnt = hdw->fw_size - offs;
+
+ memcpy(buf,hdw->fw_buffer+offs,cnt);
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Read firmware data offs=%d cnt=%d",
+ offs,cnt);
+ ret = cnt;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+
+ return ret;
+}
+
+
+int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *hdw)
+{
+ return hdw->v4l_minor_number;
+}
+
+
+/* Store the v4l minor device number */
+void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,int v)
+{
+ hdw->v4l_minor_number = v;
+}
+
+
+void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw)
+{
+ if (!hdw->usb_dev) return;
+ usb_settoggle(hdw->usb_dev, PVR2_CTL_WRITE_ENDPOINT & 0xf,
+ !(PVR2_CTL_WRITE_ENDPOINT & USB_DIR_IN), 0);
+ usb_settoggle(hdw->usb_dev, PVR2_CTL_READ_ENDPOINT & 0xf,
+ !(PVR2_CTL_READ_ENDPOINT & USB_DIR_IN), 0);
+ usb_clear_halt(hdw->usb_dev,
+ usb_rcvbulkpipe(hdw->usb_dev,
+ PVR2_CTL_READ_ENDPOINT & 0x7f));
+ usb_clear_halt(hdw->usb_dev,
+ usb_sndbulkpipe(hdw->usb_dev,
+ PVR2_CTL_WRITE_ENDPOINT & 0x7f));
+}
+
+
+static void pvr2_ctl_write_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_hdw *hdw = urb->context;
+ hdw->ctl_write_pend_flag = 0;
+ if (hdw->ctl_read_pend_flag) return;
+ complete(&hdw->ctl_done);
+}
+
+
+static void pvr2_ctl_read_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_hdw *hdw = urb->context;
+ hdw->ctl_read_pend_flag = 0;
+ if (hdw->ctl_write_pend_flag) return;
+ complete(&hdw->ctl_done);
+}
+
+
+static void pvr2_ctl_timeout(unsigned long data)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
+ hdw->ctl_timeout_flag = !0;
+ if (hdw->ctl_write_pend_flag && hdw->ctl_write_urb) {
+ usb_unlink_urb(hdw->ctl_write_urb);
+ }
+ if (hdw->ctl_read_pend_flag && hdw->ctl_read_urb) {
+ usb_unlink_urb(hdw->ctl_read_urb);
+ }
+ }
+}
+
+
+int pvr2_send_request_ex(struct pvr2_hdw *hdw,
+ unsigned int timeout,int probe_fl,
+ void *write_data,unsigned int write_len,
+ void *read_data,unsigned int read_len)
+{
+ unsigned int idx;
+ int status = 0;
+ struct timer_list timer;
+ if (!hdw->ctl_lock_held) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " without lock!!");
+ return -EDEADLK;
+ }
+ if ((!hdw->flag_ok) && !probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " when device not ok");
+ return -EIO;
+ }
+ if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " when USB is disconnected");
+ }
+ return -ENOTTY;
+ }
+
+ /* Ensure that we have sane parameters */
+ if (!write_data) write_len = 0;
+ if (!read_data) read_len = 0;
+ if (write_len > PVR2_CTL_BUFFSIZE) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute %d byte"
+ " control-write transfer (limit=%d)",
+ write_len,PVR2_CTL_BUFFSIZE);
+ return -EINVAL;
+ }
+ if (read_len > PVR2_CTL_BUFFSIZE) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute %d byte"
+ " control-read transfer (limit=%d)",
+ write_len,PVR2_CTL_BUFFSIZE);
+ return -EINVAL;
+ }
+ if ((!write_len) && (!read_len)) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute null control transfer?");
+ return -EINVAL;
+ }
+
+
+ hdw->cmd_debug_state = 1;
+ if (write_len) {
+ hdw->cmd_debug_code = ((unsigned char *)write_data)[0];
+ } else {
+ hdw->cmd_debug_code = 0;
+ }
+ hdw->cmd_debug_write_len = write_len;
+ hdw->cmd_debug_read_len = read_len;
+
+ /* Initialize common stuff */
+ init_completion(&hdw->ctl_done);
+ hdw->ctl_timeout_flag = 0;
+ hdw->ctl_write_pend_flag = 0;
+ hdw->ctl_read_pend_flag = 0;
+ init_timer(&timer);
+ timer.expires = jiffies + timeout;
+ timer.data = (unsigned long)hdw;
+ timer.function = pvr2_ctl_timeout;
+
+ if (write_len) {
+ hdw->cmd_debug_state = 2;
+ /* Transfer write data to internal buffer */
+ for (idx = 0; idx < write_len; idx++) {
+ hdw->ctl_write_buffer[idx] =
+ ((unsigned char *)write_data)[idx];
+ }
+ /* Initiate a write request */
+ usb_fill_bulk_urb(hdw->ctl_write_urb,
+ hdw->usb_dev,
+ usb_sndbulkpipe(hdw->usb_dev,
+ PVR2_CTL_WRITE_ENDPOINT),
+ hdw->ctl_write_buffer,
+ write_len,
+ pvr2_ctl_write_complete,
+ hdw);
+ hdw->ctl_write_urb->actual_length = 0;
+ hdw->ctl_write_pend_flag = !0;
+ status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to submit write-control"
+ " URB status=%d",status);
+ hdw->ctl_write_pend_flag = 0;
+ goto done;
+ }
+ }
+
+ if (read_len) {
+ hdw->cmd_debug_state = 3;
+ memset(hdw->ctl_read_buffer,0x43,read_len);
+ /* Initiate a read request */
+ usb_fill_bulk_urb(hdw->ctl_read_urb,
+ hdw->usb_dev,
+ usb_rcvbulkpipe(hdw->usb_dev,
+ PVR2_CTL_READ_ENDPOINT),
+ hdw->ctl_read_buffer,
+ read_len,
+ pvr2_ctl_read_complete,
+ hdw);
+ hdw->ctl_read_urb->actual_length = 0;
+ hdw->ctl_read_pend_flag = !0;
+ status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to submit read-control"
+ " URB status=%d",status);
+ hdw->ctl_read_pend_flag = 0;
+ goto done;
+ }
+ }
+
+ /* Start timer */
+ add_timer(&timer);
+
+ /* Now wait for all I/O to complete */
+ hdw->cmd_debug_state = 4;
+ while (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
+ wait_for_completion(&hdw->ctl_done);
+ }
+ hdw->cmd_debug_state = 5;
+
+ /* Stop timer */
+ del_timer_sync(&timer);
+
+ hdw->cmd_debug_state = 6;
+ status = 0;
+
+ if (hdw->ctl_timeout_flag) {
+ status = -ETIMEDOUT;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Timed out control-write");
+ }
+ goto done;
+ }
+
+ if (write_len) {
+ /* Validate results of write request */
+ if ((hdw->ctl_write_urb->status != 0) &&
+ (hdw->ctl_write_urb->status != -ENOENT) &&
+ (hdw->ctl_write_urb->status != -ESHUTDOWN) &&
+ (hdw->ctl_write_urb->status != -ECONNRESET)) {
+ /* USB subsystem is reporting some kind of failure
+ on the write */
+ status = hdw->ctl_write_urb->status;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-write URB failure,"
+ " status=%d",
+ status);
+ }
+ goto done;
+ }
+ if (hdw->ctl_write_urb->actual_length < write_len) {
+ /* Failed to write enough data */
+ status = -EIO;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-write URB short,"
+ " expected=%d got=%d",
+ write_len,
+ hdw->ctl_write_urb->actual_length);
+ }
+ goto done;
+ }
+ }
+ if (read_len) {
+ /* Validate results of read request */
+ if ((hdw->ctl_read_urb->status != 0) &&
+ (hdw->ctl_read_urb->status != -ENOENT) &&
+ (hdw->ctl_read_urb->status != -ESHUTDOWN) &&
+ (hdw->ctl_read_urb->status != -ECONNRESET)) {
+ /* USB subsystem is reporting some kind of failure
+ on the read */
+ status = hdw->ctl_read_urb->status;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-read URB failure,"
+ " status=%d",
+ status);
+ }
+ goto done;
+ }
+ if (hdw->ctl_read_urb->actual_length < read_len) {
+ /* Failed to read enough data */
+ status = -EIO;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-read URB short,"
+ " expected=%d got=%d",
+ read_len,
+ hdw->ctl_read_urb->actual_length);
+ }
+ goto done;
+ }
+ /* Transfer retrieved data out from internal buffer */
+ for (idx = 0; idx < read_len; idx++) {
+ ((unsigned char *)read_data)[idx] =
+ hdw->ctl_read_buffer[idx];
+ }
+ }
+
+ done:
+
+ hdw->cmd_debug_state = 0;
+ if ((status < 0) && (!probe_fl)) {
+ pvr2_hdw_render_useless_unlocked(hdw);
+ }
+ return status;
+}
+
+
+int pvr2_send_request(struct pvr2_hdw *hdw,
+ void *write_data,unsigned int write_len,
+ void *read_data,unsigned int read_len)
+{
+ return pvr2_send_request_ex(hdw,HZ*4,0,
+ write_data,write_len,
+ read_data,read_len);
+}
+
+int pvr2_write_register(struct pvr2_hdw *hdw, u16 reg, u32 data)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = 0x04; /* write register prefix */
+ PVR2_DECOMPOSE_LE(hdw->cmd_buffer,1,data);
+ hdw->cmd_buffer[5] = 0;
+ hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
+ hdw->cmd_buffer[7] = reg & 0xff;
+
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 0);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_read_register(struct pvr2_hdw *hdw, u16 reg, u32 *data)
+{
+ int ret = 0;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = 0x05; /* read register prefix */
+ hdw->cmd_buffer[1] = 0;
+ hdw->cmd_buffer[2] = 0;
+ hdw->cmd_buffer[3] = 0;
+ hdw->cmd_buffer[4] = 0;
+ hdw->cmd_buffer[5] = 0;
+ hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
+ hdw->cmd_buffer[7] = reg & 0xff;
+
+ ret |= pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 4);
+ *data = PVR2_COMPOSE_LE(hdw->cmd_buffer,0);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_write_u16(struct pvr2_hdw *hdw, u16 data, int res)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = (data >> 8) & 0xff;
+ hdw->cmd_buffer[1] = data & 0xff;
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 2, hdw->cmd_buffer, res);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_write_u8(struct pvr2_hdw *hdw, u8 data, int res)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = data;
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 1, hdw->cmd_buffer, res);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *hdw)
+{
+ if (!hdw->flag_ok) return;
+ pvr2_trace(PVR2_TRACE_INIT,"render_useless");
+ hdw->flag_ok = 0;
+ if (hdw->vid_stream) {
+ pvr2_stream_setup(hdw->vid_stream,0,0,0);
+ }
+ hdw->flag_streaming_enabled = 0;
+ hdw->subsys_enabled_mask = 0;
+}
+
+
+void pvr2_hdw_render_useless(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->ctl_lock);
+ pvr2_hdw_render_useless_unlocked(hdw);
+ LOCK_GIVE(hdw->ctl_lock);
+}
+
+
+void pvr2_hdw_device_reset(struct pvr2_hdw *hdw)
+{
+ int ret;
+ pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset...");
+ ret = usb_lock_device_for_reset(hdw->usb_dev,0);
+ if (ret == 1) {
+ ret = usb_reset_device(hdw->usb_dev);
+ usb_unlock_device(hdw->usb_dev);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to lock USB device ret=%d",ret);
+ }
+ if (init_pause_msec) {
+ pvr2_trace(PVR2_TRACE_INFO,
+ "Waiting %u msec for hardware to settle",
+ init_pause_msec);
+ msleep(init_pause_msec);
+ }
+
+}
+
+
+void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val)
+{
+ char da[1];
+ unsigned int pipe;
+ int ret;
+
+ if (!hdw->usb_dev) return;
+
+ pvr2_trace(PVR2_TRACE_INIT,"cpureset_assert(%d)",val);
+
+ da[0] = val ? 0x01 : 0x00;
+
+ /* Write the CPUCS register on the 8051. The lsb of the register
+ is the reset bit; a 1 asserts reset while a 0 clears it. */
+ pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
+ ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ);
+ if (ret < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "cpureset_assert(%d) error=%d",val,ret);
+ pvr2_hdw_render_useless(hdw);
+ }
+}
+
+
+int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *hdw)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ pvr2_trace(PVR2_TRACE_INIT,"Requesting uproc hard reset");
+ hdw->flag_ok = !0;
+ hdw->cmd_buffer[0] = 0xdd;
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ return status;
+}
+
+
+int pvr2_hdw_cmd_powerup(struct pvr2_hdw *hdw)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ pvr2_trace(PVR2_TRACE_INIT,"Requesting powerup");
+ hdw->cmd_buffer[0] = 0xde;
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ return status;
+}
+
+
+int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
+{
+ if (!hdw->decoder_ctrl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Unable to reset decoder: nothing attached");
+ return -ENOTTY;
+ }
+
+ if (!hdw->decoder_ctrl->force_reset) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Unable to reset decoder: not implemented");
+ return -ENOTTY;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Requesting decoder reset");
+ hdw->decoder_ctrl->force_reset(hdw->decoder_ctrl->ctxt);
+ return 0;
+}
+
+
+int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = (runFl ? 0x36 : 0x37);
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ if (!status) {
+ hdw->subsys_enabled_mask =
+ ((hdw->subsys_enabled_mask &
+ ~(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) |
+ (runFl ? (1<<PVR2_SUBSYS_B_USBSTREAM_RUN) : 0));
+ }
+ return status;
+}
+
+
+void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
+ struct pvr2_hdw_debug_info *ptr)
+{
+ ptr->big_lock_held = hdw->big_lock_held;
+ ptr->ctl_lock_held = hdw->ctl_lock_held;
+ ptr->flag_ok = hdw->flag_ok;
+ ptr->flag_disconnected = hdw->flag_disconnected;
+ ptr->flag_init_ok = hdw->flag_init_ok;
+ ptr->flag_streaming_enabled = hdw->flag_streaming_enabled;
+ ptr->subsys_flags = hdw->subsys_enabled_mask;
+ ptr->cmd_debug_state = hdw->cmd_debug_state;
+ ptr->cmd_code = hdw->cmd_debug_code;
+ ptr->cmd_debug_write_len = hdw->cmd_debug_write_len;
+ ptr->cmd_debug_read_len = hdw->cmd_debug_read_len;
+ ptr->cmd_debug_timeout = hdw->ctl_timeout_flag;
+ ptr->cmd_debug_write_pend = hdw->ctl_write_pend_flag;
+ ptr->cmd_debug_read_pend = hdw->ctl_read_pend_flag;
+ ptr->cmd_debug_rstatus = hdw->ctl_read_urb->status;
+ ptr->cmd_debug_wstatus = hdw->ctl_read_urb->status;
+}
+
+
+int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_DIR,dp);
+}
+
+
+int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_OUT,dp);
+}
+
+
+int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_IN,dp);
+}
+
+
+int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
+{
+ u32 cval,nval;
+ int ret;
+ if (~msk) {
+ ret = pvr2_read_register(hdw,PVR2_GPIO_DIR,&cval);
+ if (ret) return ret;
+ nval = (cval & ~msk) | (val & msk);
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO direction changing 0x%x:0x%x"
+ " from 0x%x to 0x%x",
+ msk,val,cval,nval);
+ } else {
+ nval = val;
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO direction changing to 0x%x",nval);
+ }
+ return pvr2_write_register(hdw,PVR2_GPIO_DIR,nval);
+}
+
+
+int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val)
+{
+ u32 cval,nval;
+ int ret;
+ if (~msk) {
+ ret = pvr2_read_register(hdw,PVR2_GPIO_OUT,&cval);
+ if (ret) return ret;
+ nval = (cval & ~msk) | (val & msk);
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO output changing 0x%x:0x%x from 0x%x to 0x%x",
+ msk,val,cval,nval);
+ } else {
+ nval = val;
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO output changing to 0x%x",nval);
+ }
+ return pvr2_write_register(hdw,PVR2_GPIO_OUT,nval);
+}
+
+
+int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw)
+{
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0xeb;
+ result = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ result = hdw->cmd_buffer[0];
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ return result;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
new file mode 100644
index 000000000000..63f529154431
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -0,0 +1,335 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_HDW_H
+#define __PVRUSB2_HDW_H
+
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include "pvrusb2-io.h"
+#include "pvrusb2-ctrl.h"
+
+
+/* Private internal control ids, look these up with
+ pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */
+#define PVR2_CID_STDENUM 1
+#define PVR2_CID_STDCUR 2
+#define PVR2_CID_STDAVAIL 3
+#define PVR2_CID_INPUT 4
+#define PVR2_CID_AUDIOMODE 5
+#define PVR2_CID_FREQUENCY 6
+#define PVR2_CID_HRES 7
+#define PVR2_CID_VRES 8
+
+/* Legal values for the INPUT state variable */
+#define PVR2_CVAL_INPUT_TV 0
+#define PVR2_CVAL_INPUT_SVIDEO 1
+#define PVR2_CVAL_INPUT_COMPOSITE 2
+#define PVR2_CVAL_INPUT_RADIO 3
+
+/* Values that pvr2_hdw_get_signal_status() returns */
+#define PVR2_SIGNAL_OK 0x0001
+#define PVR2_SIGNAL_STEREO 0x0002
+#define PVR2_SIGNAL_SAP 0x0004
+
+
+/* Subsystem definitions - these are various pieces that can be
+ independently stopped / started. Usually you don't want to mess with
+ this directly (let the driver handle things itself), but it is useful
+ for debugging. */
+#define PVR2_SUBSYS_B_ENC_FIRMWARE 0
+#define PVR2_SUBSYS_B_ENC_CFG 1
+#define PVR2_SUBSYS_B_DIGITIZER_RUN 2
+#define PVR2_SUBSYS_B_USBSTREAM_RUN 3
+#define PVR2_SUBSYS_B_ENC_RUN 4
+
+#define PVR2_SUBSYS_CFG_ALL ( \
+ (1 << PVR2_SUBSYS_B_ENC_FIRMWARE) | \
+ (1 << PVR2_SUBSYS_B_ENC_CFG) )
+#define PVR2_SUBSYS_RUN_ALL ( \
+ (1 << PVR2_SUBSYS_B_DIGITIZER_RUN) | \
+ (1 << PVR2_SUBSYS_B_USBSTREAM_RUN) | \
+ (1 << PVR2_SUBSYS_B_ENC_RUN) )
+#define PVR2_SUBSYS_ALL ( \
+ PVR2_SUBSYS_CFG_ALL | \
+ PVR2_SUBSYS_RUN_ALL )
+
+enum pvr2_config {
+ pvr2_config_empty,
+ pvr2_config_mpeg,
+ pvr2_config_vbi,
+ pvr2_config_radio,
+};
+
+const char *pvr2_config_get_name(enum pvr2_config);
+
+struct pvr2_hdw;
+
+/* Create and return a structure for interacting with the underlying
+ hardware */
+struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
+ const struct usb_device_id *devid);
+
+/* Poll for background activity (if any) */
+void pvr2_hdw_poll(struct pvr2_hdw *);
+
+/* Trigger a poll to take place later at a convenient time */
+void pvr2_hdw_poll_trigger(struct pvr2_hdw *);
+void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *);
+
+/* Register a callback used to trigger a future poll */
+void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *,
+ void (*func)(void *),
+ void *data);
+
+/* Get pointer to structure given unit number */
+struct pvr2_hdw *pvr2_hdw_find(int unit_number);
+
+/* Destroy hardware interaction structure */
+void pvr2_hdw_destroy(struct pvr2_hdw *);
+
+/* Set up the structure and attempt to put the device into a usable state.
+ This can be a time-consuming operation, which is why it is not done
+ internally as part of the create() step. Return value is exactly the
+ same as pvr2_hdw_init_ok(). */
+int pvr2_hdw_setup(struct pvr2_hdw *);
+
+/* Initialization succeeded */
+int pvr2_hdw_init_ok(struct pvr2_hdw *);
+
+/* Return true if in the ready (normal) state */
+int pvr2_hdw_dev_ok(struct pvr2_hdw *);
+
+/* Return small integer number [1..N] for logical instance number of this
+ device. This is useful for indexing array-valued module parameters. */
+int pvr2_hdw_get_unit_number(struct pvr2_hdw *);
+
+/* Get pointer to underlying USB device */
+struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *);
+
+/* Retrieve serial number of device */
+unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *);
+
+/* Called when hardware has been unplugged */
+void pvr2_hdw_disconnect(struct pvr2_hdw *);
+
+/* Get the number of defined controls */
+unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *);
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *,unsigned int);
+
+/* Retrieve a control handle given its internal ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *,unsigned int);
+
+/* Retrieve a control handle given its V4L ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *,unsigned int ctl_id);
+
+/* Retrieve a control handle given its immediate predecessor V4L ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *,
+ unsigned int ctl_id);
+
+/* Commit all control changes made up to this point */
+int pvr2_hdw_commit_ctl(struct pvr2_hdw *);
+
+/* Return name for this driver instance */
+const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *);
+
+/* Return PVR2_SIGNAL_XXXX bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *);
+
+/* Query device and see if it thinks it is on a high-speed USB link */
+int pvr2_hdw_is_hsm(struct pvr2_hdw *);
+
+/* Turn streaming on/off */
+int pvr2_hdw_set_streaming(struct pvr2_hdw *,int);
+
+/* Find out if streaming is on */
+int pvr2_hdw_get_streaming(struct pvr2_hdw *);
+
+/* Configure the type of stream to generate */
+int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config);
+
+/* Get handle to video output stream */
+struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
+
+/* Emit a video standard struct */
+int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
+ unsigned int idx);
+
+/* Enable / disable various pieces of hardware. Items to change are
+ identified by bit positions within msk, and new state for each item is
+ identified by corresponding bit positions within val. */
+void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+
+/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,msk) */
+void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk);
+
+/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,0) */
+void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk);
+
+/* Retrieve mask indicating which pieces of hardware are currently enabled
+ / configured. */
+unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *);
+
+/* Adjust mask of what get shut down when streaming is stopped. This is a
+ debugging aid. */
+void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+
+/* Retrieve mask indicating which pieces of hardware are disabled when
+ streaming is turned off. */
+unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *);
+
+
+/* Enable / disable retrieval of CPU firmware. This must be enabled before
+ pvr2_hdw_cpufw_get() will function. Note that doing this may prevent
+ the device from running (and leaving this mode may imply a device
+ reset). */
+void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *, int enable_flag);
+
+/* Return true if we're in a mode for retrieval CPU firmware */
+int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *);
+
+/* Retrieve a piece of the CPU's firmware at the given offset. Return
+ value is the number of bytes retrieved or zero if we're past the end or
+ an error otherwise (e.g. if firmware retrieval is not enabled). */
+int pvr2_hdw_cpufw_get(struct pvr2_hdw *,unsigned int offs,
+ char *buf,unsigned int cnt);
+
+/* Retrieve previously stored v4l minor device number */
+int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *);
+
+/* Store the v4l minor device number */
+void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *,int);
+
+
+/* The following entry points are all lower level things you normally don't
+ want to worry about. */
+
+/* Attempt to recover from a USB foul-up (in practice I find that if you
+ have to do this, then it's already too late). */
+void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw);
+
+/* Issue a command and get a response from the device. LOTS of higher
+ level stuff is built on this. */
+int pvr2_send_request(struct pvr2_hdw *,
+ void *write_ptr,unsigned int write_len,
+ void *read_ptr,unsigned int read_len);
+
+/* Issue a command and get a response from the device. This extended
+ version includes a probe flag (which if set means that device errors
+ should not be logged or treated as fatal) and a timeout in jiffies.
+ This can be used to non-lethally probe the health of endpoint 1. */
+int pvr2_send_request_ex(struct pvr2_hdw *,unsigned int timeout,int probe_fl,
+ void *write_ptr,unsigned int write_len,
+ void *read_ptr,unsigned int read_len);
+
+/* Slightly higher level device communication functions. */
+int pvr2_write_register(struct pvr2_hdw *, u16, u32);
+int pvr2_read_register(struct pvr2_hdw *, u16, u32 *);
+int pvr2_write_u16(struct pvr2_hdw *, u16, int);
+int pvr2_write_u8(struct pvr2_hdw *, u8, int);
+
+/* Call if for any reason we can't talk to the hardware anymore - this will
+ cause the driver to stop flailing on the device. */
+void pvr2_hdw_render_useless(struct pvr2_hdw *);
+void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *);
+
+/* Set / clear 8051's reset bit */
+void pvr2_hdw_cpureset_assert(struct pvr2_hdw *,int);
+
+/* Execute a USB-commanded device reset */
+void pvr2_hdw_device_reset(struct pvr2_hdw *);
+
+/* Execute hard reset command (after this point it's likely that the
+ encoder will have to be reconfigured). This also clears the "useless"
+ state. */
+int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *);
+
+/* Execute simple reset command */
+int pvr2_hdw_cmd_powerup(struct pvr2_hdw *);
+
+/* Order decoder to reset */
+int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *);
+
+/* Stop / start video stream transport */
+int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
+
+/* Find I2C address of eeprom */
+int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *);
+
+/* Direct manipulation of GPIO bits */
+int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val);
+int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val);
+
+/* This data structure is specifically for the next function... */
+struct pvr2_hdw_debug_info {
+ int big_lock_held;
+ int ctl_lock_held;
+ int flag_ok;
+ int flag_disconnected;
+ int flag_init_ok;
+ int flag_streaming_enabled;
+ unsigned long subsys_flags;
+ int cmd_debug_state;
+ int cmd_debug_write_len;
+ int cmd_debug_read_len;
+ int cmd_debug_write_pend;
+ int cmd_debug_read_pend;
+ int cmd_debug_timeout;
+ int cmd_debug_rstatus;
+ int cmd_debug_wstatus;
+ unsigned char cmd_code;
+};
+
+/* Non-intrusively retrieve internal state info - this is useful for
+ diagnosing lockups. Note that this operation is completed without any
+ kind of locking and so it is not atomic and may yield inconsistent
+ results. This is *purely* a debugging aid. */
+void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
+ struct pvr2_hdw_debug_info *);
+
+/* Cause modules to log their state once */
+void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw);
+
+/* Cause encoder firmware to be uploaded into the device. This is normally
+ done autonomously, but the interface is exported here because it is also
+ a debugging aid. */
+int pvr2_upload_firmware2(struct pvr2_hdw *hdw);
+
+/* List of device types that we can match */
+extern struct usb_device_id pvr2_device_table[];
+
+#endif /* __PVRUSB2_HDW_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
new file mode 100644
index 000000000000..1dd4f6249b99
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
@@ -0,0 +1,115 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+#include "pvrusb2-audio.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-demod.h"
+#include "pvrusb2-video-v4l.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+#include "pvrusb2-cx2584x-v4l.h"
+#include "pvrusb2-wm8775.h"
+#endif
+
+#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
+
+#define OP_STANDARD 0
+#define OP_BCSH 1
+#define OP_VOLUME 2
+#define OP_FREQ 3
+#define OP_AUDIORATE 4
+#define OP_SIZE 5
+#define OP_LOG 6
+
+static const struct pvr2_i2c_op * const ops[] = {
+ [OP_STANDARD] = &pvr2_i2c_op_v4l2_standard,
+ [OP_BCSH] = &pvr2_i2c_op_v4l2_bcsh,
+ [OP_VOLUME] = &pvr2_i2c_op_v4l2_volume,
+ [OP_FREQ] = &pvr2_i2c_op_v4l2_frequency,
+ [OP_SIZE] = &pvr2_i2c_op_v4l2_size,
+ [OP_LOG] = &pvr2_i2c_op_v4l2_log,
+};
+
+void pvr2_i2c_probe(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ int id;
+ id = cp->client->driver->id;
+ cp->ctl_mask = ((1 << OP_STANDARD) |
+ (1 << OP_BCSH) |
+ (1 << OP_VOLUME) |
+ (1 << OP_FREQ) |
+ (1 << OP_SIZE) |
+ (1 << OP_LOG));
+
+ if (id == I2C_DRIVERID_MSP3400) {
+ if (pvr2_i2c_msp3400_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_TUNER) {
+ if (pvr2_i2c_tuner_setup(hdw,cp)) {
+ return;
+ }
+ }
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ if (id == I2C_DRIVERID_CX25840) {
+ if (pvr2_i2c_cx2584x_v4l_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_WM8775) {
+ if (pvr2_i2c_wm8775_setup(hdw,cp)) {
+ return;
+ }
+ }
+#endif
+ if (id == I2C_DRIVERID_SAA711X) {
+ if (pvr2_i2c_decoder_v4l_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_TDA9887) {
+ if (pvr2_i2c_demod_setup(hdw,cp)) {
+ return;
+ }
+ }
+}
+
+
+const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx)
+{
+ if (idx >= sizeof(ops)/sizeof(ops[0])) return 0;
+ return ops[idx];
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
new file mode 100644
index 000000000000..9f81aff2b38a
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
@@ -0,0 +1,232 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-cmd-v4l2.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+
+
+static void set_standard(struct pvr2_hdw *hdw)
+{
+ v4l2_std_id vs;
+ vs = hdw->std_mask_cur;
+ pvr2_trace(PVR2_TRACE_CHIPS,
+ "i2c v4l2 set_standard(0x%llx)",(__u64)vs);
+
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_STD,&vs);
+}
+
+
+static int check_standard(struct pvr2_hdw *hdw)
+{
+ return hdw->std_dirty != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard = {
+ .check = check_standard,
+ .update = set_standard,
+ .name = "v4l2_standard",
+};
+
+
+static void set_bcsh(struct pvr2_hdw *hdw)
+{
+ struct v4l2_control ctrl;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_bcsh"
+ " b=%d c=%d s=%d h=%d",
+ hdw->brightness_val,hdw->contrast_val,
+ hdw->saturation_val,hdw->hue_val);
+ memset(&ctrl,0,sizeof(ctrl));
+ ctrl.id = V4L2_CID_BRIGHTNESS;
+ ctrl.value = hdw->brightness_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_CONTRAST;
+ ctrl.value = hdw->contrast_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_SATURATION;
+ ctrl.value = hdw->saturation_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_HUE;
+ ctrl.value = hdw->hue_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+}
+
+
+static int check_bcsh(struct pvr2_hdw *hdw)
+{
+ return (hdw->brightness_dirty ||
+ hdw->contrast_dirty ||
+ hdw->saturation_dirty ||
+ hdw->hue_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh = {
+ .check = check_bcsh,
+ .update = set_bcsh,
+ .name = "v4l2_bcsh",
+};
+
+
+static void set_volume(struct pvr2_hdw *hdw)
+{
+ struct v4l2_control ctrl;
+ pvr2_trace(PVR2_TRACE_CHIPS,
+ "i2c v4l2 set_volume"
+ "(vol=%d bal=%d bas=%d treb=%d mute=%d)",
+ hdw->volume_val,
+ hdw->balance_val,
+ hdw->bass_val,
+ hdw->treble_val,
+ hdw->mute_val);
+ memset(&ctrl,0,sizeof(ctrl));
+ ctrl.id = V4L2_CID_AUDIO_MUTE;
+ ctrl.value = hdw->mute_val ? 1 : 0;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_VOLUME;
+ ctrl.value = hdw->volume_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_BALANCE;
+ ctrl.value = hdw->balance_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_BASS;
+ ctrl.value = hdw->bass_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_TREBLE;
+ ctrl.value = hdw->treble_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+}
+
+
+static int check_volume(struct pvr2_hdw *hdw)
+{
+ return (hdw->volume_dirty ||
+ hdw->balance_dirty ||
+ hdw->bass_dirty ||
+ hdw->treble_dirty ||
+ hdw->mute_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume = {
+ .check = check_volume,
+ .update = set_volume,
+ .name = "v4l2_volume",
+};
+
+
+static void set_frequency(struct pvr2_hdw *hdw)
+{
+ unsigned long fv;
+ struct v4l2_frequency freq;
+ fv = hdw->freqVal;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_freq(%lu)",fv);
+ memset(&freq,0,sizeof(freq));
+ freq.frequency = fv / 62500;
+ freq.tuner = 0;
+ freq.type = V4L2_TUNER_ANALOG_TV;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_FREQUENCY,&freq);
+}
+
+
+static int check_frequency(struct pvr2_hdw *hdw)
+{
+ return hdw->freqDirty != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency = {
+ .check = check_frequency,
+ .update = set_frequency,
+ .name = "v4l2_freq",
+};
+
+
+static void set_size(struct pvr2_hdw *hdw)
+{
+ struct v4l2_format fmt;
+
+ memset(&fmt,0,sizeof(fmt));
+
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.width = hdw->res_hor_val;
+ fmt.fmt.pix.height = hdw->res_ver_val;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_size(%dx%d)",
+ fmt.fmt.pix.width,fmt.fmt.pix.height);
+
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_FMT,&fmt);
+}
+
+
+static int check_size(struct pvr2_hdw *hdw)
+{
+ return (hdw->res_hor_dirty || hdw->res_ver_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size = {
+ .check = check_size,
+ .update = set_size,
+ .name = "v4l2_size",
+};
+
+
+static void do_log(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 do_log()");
+ pvr2_i2c_core_cmd(hdw,VIDIOC_LOG_STATUS,0);
+
+}
+
+
+static int check_log(struct pvr2_hdw *hdw)
+{
+ return hdw->log_requested != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log = {
+ .check = check_log,
+ .update = do_log,
+ .name = "v4l2_log",
+};
+
+
+void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *cp,int fl)
+{
+ pvr2_i2c_client_cmd(cp,
+ (fl ? VIDIOC_STREAMON : VIDIOC_STREAMOFF),0);
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
new file mode 100644
index 000000000000..ecabddba1ec5
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_CMD_V4L2_H
+#define __PVRUSB2_CMD_V4L2_H
+
+#include "pvrusb2-i2c-core.h"
+
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log;
+
+void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *,int);
+
+#endif /* __PVRUSB2_CMD_V4L2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
new file mode 100644
index 000000000000..c8d0bdee3ff1
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -0,0 +1,937 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
+
+/*
+
+ This module attempts to implement a compliant I2C adapter for the pvrusb2
+ device. By doing this we can then make use of existing functionality in
+ V4L (e.g. tuner.c) rather than rolling our own.
+
+*/
+
+static unsigned int i2c_scan = 0;
+module_param(i2c_scan, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
+
+static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
+ u8 i2c_addr, /* I2C address we're talking to */
+ u8 *data, /* Data to write */
+ u16 length) /* Size of data to write */
+{
+ /* Return value - default 0 means success */
+ int ret;
+
+
+ if (!data) length = 0;
+ if (length > (sizeof(hdw->cmd_buffer) - 3)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C write to %u that is too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
+ return -ENOTSUPP;
+ }
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ /* Clear the command buffer (likely to be paranoia) */
+ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
+
+ /* Set up command buffer for an I2C write */
+ hdw->cmd_buffer[0] = 0x08; /* write prefix */
+ hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */
+ hdw->cmd_buffer[2] = length; /* length of what follows */
+ if (length) memcpy(hdw->cmd_buffer + 3, data, length);
+
+ /* Do the operation */
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,
+ length + 3,
+ hdw->cmd_buffer,
+ 1);
+ if (!ret) {
+ if (hdw->cmd_buffer[0] != 8) {
+ ret = -EIO;
+ if (hdw->cmd_buffer[0] != 7) {
+ trace_i2c("unexpected status"
+ " from i2_write[%d]: %d",
+ i2c_addr,hdw->cmd_buffer[0]);
+ }
+ }
+ }
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
+ u8 i2c_addr, /* I2C address we're talking to */
+ u8 *data, /* Data to write */
+ u16 dlen, /* Size of data to write */
+ u8 *res, /* Where to put data we read */
+ u16 rlen) /* Amount of data to read */
+{
+ /* Return value - default 0 means success */
+ int ret;
+
+
+ if (!data) dlen = 0;
+ if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C read to %u that has wlen too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
+ return -ENOTSUPP;
+ }
+ if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C read to %u that has rlen too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
+ return -ENOTSUPP;
+ }
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ /* Clear the command buffer (likely to be paranoia) */
+ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
+
+ /* Set up command buffer for an I2C write followed by a read */
+ hdw->cmd_buffer[0] = 0x09; /* read prefix */
+ hdw->cmd_buffer[1] = dlen; /* arg length */
+ hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one
+ more byte (status). */
+ hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */
+ if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen);
+
+ /* Do the operation */
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,
+ 4 + dlen,
+ hdw->cmd_buffer,
+ rlen + 1);
+ if (!ret) {
+ if (hdw->cmd_buffer[0] != 8) {
+ ret = -EIO;
+ if (hdw->cmd_buffer[0] != 7) {
+ trace_i2c("unexpected status"
+ " from i2_read[%d]: %d",
+ i2c_addr,hdw->cmd_buffer[0]);
+ }
+ }
+ }
+
+ /* Copy back the result */
+ if (res && rlen) {
+ if (ret) {
+ /* Error, just blank out the return buffer */
+ memset(res, 0, rlen);
+ } else {
+ memcpy(res, hdw->cmd_buffer + 1, rlen);
+ }
+ }
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+/* This is the common low level entry point for doing I2C operations to the
+ hardware. */
+int pvr2_i2c_basic_op(struct pvr2_hdw *hdw,
+ u8 i2c_addr,
+ u8 *wdata,
+ u16 wlen,
+ u8 *rdata,
+ u16 rlen)
+{
+ if (!rdata) rlen = 0;
+ if (!wdata) wlen = 0;
+ if (rlen || !wlen) {
+ return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+ } else {
+ return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen);
+ }
+}
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+
+/* This is a special entry point that is entered if an I2C operation is
+ attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this
+ part doesn't work, but we know it is really there. So let's look for
+ the autodetect attempt and just return success if we see that. */
+static int i2c_hack_wm8775(struct pvr2_hdw *hdw,
+ u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
+{
+ if (!(rlen || wlen)) {
+ // This is a probe attempt. Just let it succeed.
+ return 0;
+ }
+ return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+}
+
+/* This is a special entry point that is entered if an I2C operation is
+ attempted to a cx25840 chip on model 24xxx hardware. This chip can
+ sometimes wedge itself. Worse still, when this happens msp3400 can
+ falsely detect this part and then the system gets hosed up after msp3400
+ gets confused and dies. What we want to do here is try to keep msp3400
+ away and also try to notice if the chip is wedged and send a warning to
+ the system log. */
+static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
+ u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
+{
+ int ret;
+ unsigned int subaddr;
+ u8 wbuf[2];
+ int state = hdw->i2c_cx25840_hack_state;
+
+ if (!(rlen || wlen)) {
+ // Probe attempt - always just succeed and don't bother the
+ // hardware (this helps to make the state machine further
+ // down somewhat easier).
+ return 0;
+ }
+
+ if (state == 3) {
+ return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+ }
+
+ /* We're looking for the exact pattern where the revision register
+ is being read. The cx25840 module will always look at the
+ revision register first. Any other pattern of access therefore
+ has to be a probe attempt from somebody else so we'll reject it.
+ Normally we could just let each client just probe the part
+ anyway, but when the cx25840 is wedged, msp3400 will get a false
+ positive and that just screws things up... */
+
+ if (wlen == 0) {
+ switch (state) {
+ case 1: subaddr = 0x0100; break;
+ case 2: subaddr = 0x0101; break;
+ default: goto fail;
+ }
+ } else if (wlen == 2) {
+ subaddr = (wdata[0] << 8) | wdata[1];
+ switch (subaddr) {
+ case 0x0100: state = 1; break;
+ case 0x0101: state = 2; break;
+ default: goto fail;
+ }
+ } else {
+ goto fail;
+ }
+ if (!rlen) goto success;
+ state = 0;
+ if (rlen != 1) goto fail;
+
+ /* If we get to here then we have a legitimate read for one of the
+ two revision bytes, so pass it through. */
+ wbuf[0] = subaddr >> 8;
+ wbuf[1] = subaddr;
+ ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen);
+
+ if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Detected a wedged cx25840 chip;"
+ " the device will not work.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Try power cycling the pvrusb2 device.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Disabling further access to the device"
+ " to prevent other foul-ups.");
+ // This blocks all further communication with the part.
+ hdw->i2c_func[0x44] = 0;
+ pvr2_hdw_render_useless(hdw);
+ goto fail;
+ }
+
+ /* Success! */
+ pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK.");
+ state = 3;
+
+ success:
+ hdw->i2c_cx25840_hack_state = state;
+ return 0;
+
+ fail:
+ hdw->i2c_cx25840_hack_state = state;
+ return -EIO;
+}
+
+#endif /* CONFIG_VIDEO_PVRUSB2_24XXX */
+
+/* This is a very, very limited I2C adapter implementation. We can only
+ support what we actually know will work on the device... */
+static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ int ret = -ENOTSUPP;
+ pvr2_i2c_func funcp = 0;
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data);
+
+ if (!num) {
+ ret = -EINVAL;
+ goto done;
+ }
+ if ((msgs[0].flags & I2C_M_NOSTART)) {
+ trace_i2c("i2c refusing I2C_M_NOSTART");
+ goto done;
+ }
+ if (msgs[0].addr < PVR2_I2C_FUNC_CNT) {
+ funcp = hdw->i2c_func[msgs[0].addr];
+ }
+ if (!funcp) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (num == 1) {
+ if (msgs[0].flags & I2C_M_RD) {
+ /* Simple read */
+ u16 tcnt,bcnt,offs;
+ if (!msgs[0].len) {
+ /* Length == 0 read. This is a probe. */
+ if (funcp(hdw,msgs[0].addr,0,0,0,0)) {
+ ret = -EIO;
+ goto done;
+ }
+ ret = 1;
+ goto done;
+ }
+ /* If the read is short enough we'll do the whole
+ thing atomically. Otherwise we have no choice
+ but to break apart the reads. */
+ tcnt = msgs[0].len;
+ offs = 0;
+ while (tcnt) {
+ bcnt = tcnt;
+ if (bcnt > sizeof(hdw->cmd_buffer)-1) {
+ bcnt = sizeof(hdw->cmd_buffer)-1;
+ }
+ if (funcp(hdw,msgs[0].addr,0,0,
+ msgs[0].buf+offs,bcnt)) {
+ ret = -EIO;
+ goto done;
+ }
+ offs += bcnt;
+ tcnt -= bcnt;
+ }
+ ret = 1;
+ goto done;
+ } else {
+ /* Simple write */
+ ret = 1;
+ if (funcp(hdw,msgs[0].addr,
+ msgs[0].buf,msgs[0].len,0,0)) {
+ ret = -EIO;
+ }
+ goto done;
+ }
+ } else if (num == 2) {
+ if (msgs[0].addr != msgs[1].addr) {
+ trace_i2c("i2c refusing 2 phase transfer with"
+ " conflicting target addresses");
+ ret = -ENOTSUPP;
+ goto done;
+ }
+ if ((!((msgs[0].flags & I2C_M_RD))) &&
+ (msgs[1].flags & I2C_M_RD)) {
+ u16 tcnt,bcnt,wcnt,offs;
+ /* Write followed by atomic read. If the read
+ portion is short enough we'll do the whole thing
+ atomically. Otherwise we have no choice but to
+ break apart the reads. */
+ tcnt = msgs[1].len;
+ wcnt = msgs[0].len;
+ offs = 0;
+ while (tcnt || wcnt) {
+ bcnt = tcnt;
+ if (bcnt > sizeof(hdw->cmd_buffer)-1) {
+ bcnt = sizeof(hdw->cmd_buffer)-1;
+ }
+ if (funcp(hdw,msgs[0].addr,
+ msgs[0].buf,wcnt,
+ msgs[1].buf+offs,bcnt)) {
+ ret = -EIO;
+ goto done;
+ }
+ offs += bcnt;
+ tcnt -= bcnt;
+ wcnt = 0;
+ }
+ ret = 2;
+ goto done;
+ } else {
+ trace_i2c("i2c refusing complex transfer"
+ " read0=%d read1=%d",
+ (msgs[0].flags & I2C_M_RD),
+ (msgs[1].flags & I2C_M_RD));
+ }
+ } else {
+ trace_i2c("i2c refusing %d phase transfer",num);
+ }
+
+ done:
+ if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) {
+ unsigned int idx,offs,cnt;
+ for (idx = 0; idx < num; idx++) {
+ cnt = msgs[idx].len;
+ printk(KERN_INFO
+ "pvrusb2 i2c xfer %u/%u:"
+ " addr=0x%x len=%d %s%s",
+ idx+1,num,
+ msgs[idx].addr,
+ cnt,
+ (msgs[idx].flags & I2C_M_RD ?
+ "read" : "write"),
+ (msgs[idx].flags & I2C_M_NOSTART ?
+ " nostart" : ""));
+ if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
+ if (cnt > 8) cnt = 8;
+ printk(" [");
+ for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
+ if (offs) printk(" ");
+ printk("%02x",msgs[idx].buf[offs]);
+ }
+ if (offs < cnt) printk(" ...");
+ printk("]");
+ }
+ if (idx+1 == num) {
+ printk(" result=%d",ret);
+ }
+ printk("\n");
+ }
+ if (!num) {
+ printk(KERN_INFO
+ "pvrusb2 i2c xfer null transfer result=%d\n",
+ ret);
+ }
+ }
+ return ret;
+}
+
+static int pvr2_i2c_control(struct i2c_adapter *adapter,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+
+static u32 pvr2_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int pvr2_i2c_core_singleton(struct i2c_client *cp,
+ unsigned int cmd,void *arg)
+{
+ int stat;
+ if (!cp) return -EINVAL;
+ if (!(cp->driver)) return -EINVAL;
+ if (!(cp->driver->command)) return -EINVAL;
+ if (!try_module_get(cp->driver->driver.owner)) return -EAGAIN;
+ stat = cp->driver->command(cp,cmd,arg);
+ module_put(cp->driver->driver.owner);
+ return stat;
+}
+
+int pvr2_i2c_client_cmd(struct pvr2_i2c_client *cp,unsigned int cmd,void *arg)
+{
+ int stat;
+ if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
+ char buf[100];
+ unsigned int cnt;
+ cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
+ buf,sizeof(buf));
+ pvr2_trace(PVR2_TRACE_I2C_CMD,
+ "i2c COMMAND (code=%u 0x%x) to %.*s",
+ cmd,cmd,cnt,buf);
+ }
+ stat = pvr2_i2c_core_singleton(cp->client,cmd,arg);
+ if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
+ char buf[100];
+ unsigned int cnt;
+ cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
+ buf,sizeof(buf));
+ pvr2_trace(PVR2_TRACE_I2C_CMD,
+ "i2c COMMAND to %.*s (ret=%d)",cnt,buf,stat);
+ }
+ return stat;
+}
+
+int pvr2_i2c_core_cmd(struct pvr2_hdw *hdw,unsigned int cmd,void *arg)
+{
+ struct list_head *item,*nc;
+ struct pvr2_i2c_client *cp;
+ int stat = -EINVAL;
+
+ if (!hdw) return stat;
+
+ mutex_lock(&hdw->i2c_list_lock);
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (!cp->recv_enable) continue;
+ mutex_unlock(&hdw->i2c_list_lock);
+ stat = pvr2_i2c_client_cmd(cp,cmd,arg);
+ mutex_lock(&hdw->i2c_list_lock);
+ }
+ mutex_unlock(&hdw->i2c_list_lock);
+ return stat;
+}
+
+
+static int handler_check(struct pvr2_i2c_client *cp)
+{
+ struct pvr2_i2c_handler *hp = cp->handler;
+ if (!hp) return 0;
+ if (!hp->func_table->check) return 0;
+ return hp->func_table->check(hp->func_data) != 0;
+}
+
+#define BUFSIZE 500
+
+void pvr2_i2c_core_sync(struct pvr2_hdw *hdw)
+{
+ unsigned long msk;
+ unsigned int idx;
+ struct list_head *item,*nc;
+ struct pvr2_i2c_client *cp;
+
+ if (!hdw->i2c_linked) return;
+ if (!(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL)) {
+ return;
+ }
+ mutex_lock(&hdw->i2c_list_lock); do {
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync BEGIN");
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_DETECT) {
+ /* One or more I2C clients have attached since we
+ last synced. So scan the list and identify the
+ new clients. */
+ char *buf;
+ unsigned int cnt;
+ unsigned long amask = 0;
+ buf = kmalloc(BUFSIZE,GFP_KERNEL);
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_DETECT");
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_DETECT;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ if (!cp->detected_flag) {
+ cp->ctl_mask = 0;
+ pvr2_i2c_probe(hdw,cp);
+ cp->detected_flag = !0;
+ msk = cp->ctl_mask;
+ cnt = 0;
+ if (buf) {
+ cnt = pvr2_i2c_client_describe(
+ cp,
+ PVR2_I2C_DETAIL_ALL,
+ buf,BUFSIZE);
+ }
+ trace_i2c("Probed: %.*s",cnt,buf);
+ if (handler_check(cp)) {
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_CLIENT;
+ }
+ cp->pend_mask = msk;
+ hdw->i2c_pend_mask |= msk;
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_REFRESH;
+ }
+ amask |= cp->ctl_mask;
+ }
+ hdw->i2c_active_mask = amask;
+ if (buf) kfree(buf);
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_STALE) {
+ /* Need to do one or more global updates. Arrange
+ for this to happen. */
+ unsigned long m2;
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: PEND_STALE (0x%lx)",
+ hdw->i2c_stale_mask);
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_STALE;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ m2 = hdw->i2c_stale_mask;
+ m2 &= cp->ctl_mask;
+ m2 &= ~cp->pend_mask;
+ if (m2) {
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: cp=%p setting 0x%lx",
+ cp,m2);
+ cp->pend_mask |= m2;
+ }
+ }
+ hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
+ hdw->i2c_stale_mask = 0;
+ hdw->i2c_pend_types |= PVR2_I2C_PEND_REFRESH;
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_CLIENT) {
+ /* One or more client handlers are asking for an
+ update. Run through the list of known clients
+ and update each one. */
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_CLIENT");
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_CLIENT;
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ if (!cp->handler) continue;
+ if (!cp->handler->func_table->update) continue;
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: cp=%p update",cp);
+ mutex_unlock(&hdw->i2c_list_lock);
+ cp->handler->func_table->update(
+ cp->handler->func_data);
+ mutex_lock(&hdw->i2c_list_lock);
+ /* If client's update function set some
+ additional pending bits, account for that
+ here. */
+ if (cp->pend_mask & ~hdw->i2c_pend_mask) {
+ hdw->i2c_pend_mask |= cp->pend_mask;
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_REFRESH;
+ }
+ }
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_REFRESH) {
+ const struct pvr2_i2c_op *opf;
+ unsigned long pm;
+ /* Some actual updates are pending. Walk through
+ each update type and perform it. */
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_REFRESH"
+ " (0x%lx)",hdw->i2c_pend_mask);
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_REFRESH;
+ pm = hdw->i2c_pend_mask;
+ hdw->i2c_pend_mask = 0;
+ for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
+ if (!(pm & msk)) continue;
+ pm &= ~msk;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,
+ struct pvr2_i2c_client,
+ list);
+ if (cp->pend_mask & msk) {
+ cp->pend_mask &= ~msk;
+ cp->recv_enable = !0;
+ } else {
+ cp->recv_enable = 0;
+ }
+ }
+ opf = pvr2_i2c_get_op(idx);
+ if (!opf) continue;
+ mutex_unlock(&hdw->i2c_list_lock);
+ opf->update(hdw);
+ mutex_lock(&hdw->i2c_list_lock);
+ }
+ }
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync END");
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+}
+
+int pvr2_i2c_core_check_stale(struct pvr2_hdw *hdw)
+{
+ unsigned long msk,sm,pm;
+ unsigned int idx;
+ const struct pvr2_i2c_op *opf;
+ struct list_head *item;
+ struct pvr2_i2c_client *cp;
+ unsigned int pt = 0;
+
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale BEGIN");
+
+ pm = hdw->i2c_active_mask;
+ sm = 0;
+ for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
+ if (!(msk & pm)) continue;
+ pm &= ~msk;
+ opf = pvr2_i2c_get_op(idx);
+ if (!opf) continue;
+ if (opf->check(hdw)) {
+ sm |= msk;
+ }
+ }
+ if (sm) pt |= PVR2_I2C_PEND_STALE;
+
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (!handler_check(cp)) continue;
+ pt |= PVR2_I2C_PEND_CLIENT;
+ }
+
+ if (pt) {
+ mutex_lock(&hdw->i2c_list_lock); do {
+ hdw->i2c_pend_types |= pt;
+ hdw->i2c_stale_mask |= sm;
+ hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ }
+
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: types=0x%x stale=0x%lx pend=0x%lx",
+ hdw->i2c_pend_types,
+ hdw->i2c_stale_mask,
+ hdw->i2c_pend_mask);
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale END");
+
+ return (hdw->i2c_pend_types & PVR2_I2C_PEND_ALL) != 0;
+}
+
+unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *cp,
+ unsigned int detail,
+ char *buf,unsigned int maxlen)
+{
+ unsigned int ccnt,bcnt;
+ int spcfl = 0;
+ const struct pvr2_i2c_op *opf;
+
+ ccnt = 0;
+ if (detail & PVR2_I2C_DETAIL_DEBUG) {
+ bcnt = scnprintf(buf,maxlen,
+ "ctxt=%p ctl_mask=0x%lx",
+ cp,cp->ctl_mask);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ spcfl = !0;
+ }
+ bcnt = scnprintf(buf,maxlen,
+ "%s%s @ 0x%x",
+ (spcfl ? " " : ""),
+ cp->client->name,
+ cp->client->addr);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ if ((detail & PVR2_I2C_DETAIL_HANDLER) &&
+ cp->handler && cp->handler->func_table->describe) {
+ bcnt = scnprintf(buf,maxlen," (");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = cp->handler->func_table->describe(
+ cp->handler->func_data,buf,maxlen);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = scnprintf(buf,maxlen,")");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ if ((detail & PVR2_I2C_DETAIL_CTLMASK) && cp->ctl_mask) {
+ unsigned int idx;
+ unsigned long msk,sm;
+ int spcfl;
+ bcnt = scnprintf(buf,maxlen," [");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ sm = 0;
+ spcfl = 0;
+ for (idx = 0, msk = 1; msk; idx++, msk <<= 1) {
+ if (!(cp->ctl_mask & msk)) continue;
+ opf = pvr2_i2c_get_op(idx);
+ if (opf) {
+ bcnt = scnprintf(buf,maxlen,"%s%s",
+ spcfl ? " " : "",
+ opf->name);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ spcfl = !0;
+ } else {
+ sm |= msk;
+ }
+ }
+ if (sm) {
+ bcnt = scnprintf(buf,maxlen,"%s%lx",
+ idx != 0 ? " " : "",sm);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ bcnt = scnprintf(buf,maxlen,"]");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ return ccnt;
+}
+
+unsigned int pvr2_i2c_report(struct pvr2_hdw *hdw,
+ char *buf,unsigned int maxlen)
+{
+ unsigned int ccnt,bcnt;
+ struct list_head *item;
+ struct pvr2_i2c_client *cp;
+ ccnt = 0;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ bcnt = pvr2_i2c_client_describe(
+ cp,
+ (PVR2_I2C_DETAIL_HANDLER|
+ PVR2_I2C_DETAIL_CTLMASK),
+ buf,maxlen);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = scnprintf(buf,maxlen,"\n");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ return ccnt;
+}
+
+static int pvr2_i2c_attach_inform(struct i2c_client *client)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
+ struct pvr2_i2c_client *cp;
+ int fl = !(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL);
+ cp = kmalloc(sizeof(*cp),GFP_KERNEL);
+ trace_i2c("i2c_attach [client=%s @ 0x%x ctxt=%p]",
+ client->name,
+ client->addr,cp);
+ if (!cp) return -ENOMEM;
+ memset(cp,0,sizeof(*cp));
+ INIT_LIST_HEAD(&cp->list);
+ cp->client = client;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_add_tail(&cp->list,&hdw->i2c_clients);
+ hdw->i2c_pend_types |= PVR2_I2C_PEND_DETECT;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ if (fl) pvr2_hdw_poll_trigger_unlocked(hdw);
+ return 0;
+}
+
+static int pvr2_i2c_detach_inform(struct i2c_client *client)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
+ struct pvr2_i2c_client *cp;
+ struct list_head *item,*nc;
+ unsigned long amask = 0;
+ int foundfl = 0;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (cp->client == client) {
+ trace_i2c("pvr2_i2c_detach"
+ " [client=%s @ 0x%x ctxt=%p]",
+ client->name,
+ client->addr,cp);
+ if (cp->handler &&
+ cp->handler->func_table->detach) {
+ cp->handler->func_table->detach(
+ cp->handler->func_data);
+ }
+ list_del(&cp->list);
+ kfree(cp);
+ foundfl = !0;
+ continue;
+ }
+ amask |= cp->ctl_mask;
+ }
+ hdw->i2c_active_mask = amask;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ if (!foundfl) {
+ trace_i2c("pvr2_i2c_detach [client=%s @ 0x%x ctxt=<unknown>]",
+ client->name,
+ client->addr);
+ }
+ return 0;
+}
+
+static struct i2c_algorithm pvr2_i2c_algo_template = {
+ .master_xfer = pvr2_i2c_xfer,
+ .algo_control = pvr2_i2c_control,
+ .functionality = pvr2_i2c_functionality,
+};
+
+static struct i2c_adapter pvr2_i2c_adap_template = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_TV_ANALOG,
+ .id = I2C_HW_B_BT848,
+ .client_register = pvr2_i2c_attach_inform,
+ .client_unregister = pvr2_i2c_detach_inform,
+};
+
+static void do_i2c_scan(struct pvr2_hdw *hdw)
+{
+ struct i2c_msg msg[1];
+ int i,rc;
+ msg[0].addr = 0;
+ msg[0].flags = I2C_M_RD;
+ msg[0].len = 0;
+ msg[0].buf = 0;
+ printk("%s: i2c scan beginning\n",hdw->name);
+ for (i = 0; i < 128; i++) {
+ msg[0].addr = i;
+ rc = i2c_transfer(&hdw->i2c_adap,msg,
+ sizeof(msg)/sizeof(msg[0]));
+ if (rc != 1) continue;
+ printk("%s: i2c scan: found device @ 0x%x\n",hdw->name,i);
+ }
+ printk("%s: i2c scan done.\n",hdw->name);
+}
+
+void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
+{
+ unsigned int idx;
+
+ // The default action for all possible I2C addresses is just to do
+ // the transfer normally.
+ for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) {
+ hdw->i2c_func[idx] = pvr2_i2c_basic_op;
+ }
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ // If however we're dealing with new hardware, insert some hacks in
+ // the I2C transfer stack to let things work better.
+ if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) {
+ hdw->i2c_func[0x1b] = i2c_hack_wm8775;
+ hdw->i2c_func[0x44] = i2c_hack_cx25840;
+ }
+#endif
+
+ // Configure the adapter and set up everything else related to it.
+ memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
+ memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
+ strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
+ hdw->i2c_adap.algo = &hdw->i2c_algo;
+ hdw->i2c_adap.algo_data = hdw;
+ hdw->i2c_pend_mask = 0;
+ hdw->i2c_stale_mask = 0;
+ hdw->i2c_active_mask = 0;
+ INIT_LIST_HEAD(&hdw->i2c_clients);
+ mutex_init(&hdw->i2c_list_lock);
+ hdw->i2c_linked = !0;
+ i2c_add_adapter(&hdw->i2c_adap);
+ if (i2c_scan) do_i2c_scan(hdw);
+}
+
+void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
+{
+ if (hdw->i2c_linked) {
+ i2c_del_adapter(&hdw->i2c_adap);
+ hdw->i2c_linked = 0;
+ }
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
new file mode 100644
index 000000000000..e8af5b0ed3ce
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_I2C_CORE_H
+#define __PVRUSB2_I2C_CORE_H
+
+#include <linux/list.h>
+#include <linux/i2c.h>
+
+struct pvr2_hdw;
+struct pvr2_i2c_client;
+struct pvr2_i2c_handler;
+struct pvr2_i2c_handler_functions;
+struct pvr2_i2c_op;
+struct pvr2_i2c_op_functions;
+
+struct pvr2_i2c_client {
+ struct i2c_client *client;
+ struct pvr2_i2c_handler *handler;
+ struct list_head list;
+ int detected_flag;
+ int recv_enable;
+ unsigned long pend_mask;
+ unsigned long ctl_mask;
+};
+
+struct pvr2_i2c_handler {
+ void *func_data;
+ const struct pvr2_i2c_handler_functions *func_table;
+};
+
+struct pvr2_i2c_handler_functions {
+ void (*detach)(void *);
+ int (*check)(void *);
+ void (*update)(void *);
+ unsigned int (*describe)(void *,char *,unsigned int);
+};
+
+struct pvr2_i2c_op {
+ int (*check)(struct pvr2_hdw *);
+ void (*update)(struct pvr2_hdw *);
+ const char *name;
+};
+
+void pvr2_i2c_core_init(struct pvr2_hdw *);
+void pvr2_i2c_core_done(struct pvr2_hdw *);
+
+int pvr2_i2c_client_cmd(struct pvr2_i2c_client *,unsigned int cmd,void *arg);
+int pvr2_i2c_core_cmd(struct pvr2_hdw *,unsigned int cmd,void *arg);
+
+int pvr2_i2c_core_check_stale(struct pvr2_hdw *);
+void pvr2_i2c_core_sync(struct pvr2_hdw *);
+unsigned int pvr2_i2c_report(struct pvr2_hdw *,char *buf,unsigned int maxlen);
+#define PVR2_I2C_DETAIL_DEBUG 0x0001
+#define PVR2_I2C_DETAIL_HANDLER 0x0002
+#define PVR2_I2C_DETAIL_CTLMASK 0x0004
+#define PVR2_I2C_DETAIL_ALL (\
+ PVR2_I2C_DETAIL_DEBUG |\
+ PVR2_I2C_DETAIL_HANDLER |\
+ PVR2_I2C_DETAIL_CTLMASK)
+unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *,
+ unsigned int detail_mask,
+ char *buf,unsigned int maxlen);
+
+void pvr2_i2c_probe(struct pvr2_hdw *,struct pvr2_i2c_client *);
+const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx);
+
+#endif /* __PVRUSB2_I2C_CORE_H */
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.c b/drivers/media/video/pvrusb2/pvrusb2-io.c
new file mode 100644
index 000000000000..a984c91f571c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.c
@@ -0,0 +1,695 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-io.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#define BUFFER_SIG 0x47653271
+
+// #define SANITY_CHECK_BUFFERS
+
+
+#ifdef SANITY_CHECK_BUFFERS
+#define BUFFER_CHECK(bp) do { \
+ if ((bp)->signature != BUFFER_SIG) { \
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
+ "Buffer %p is bad at %s:%d", \
+ (bp),__FILE__,__LINE__); \
+ pvr2_buffer_describe(bp,"BadSig"); \
+ BUG(); \
+ } \
+} while (0)
+#else
+#define BUFFER_CHECK(bp) do {} while(0)
+#endif
+
+struct pvr2_stream {
+ /* Buffers queued for reading */
+ struct list_head queued_list;
+ unsigned int q_count;
+ unsigned int q_bcount;
+ /* Buffers with retrieved data */
+ struct list_head ready_list;
+ unsigned int r_count;
+ unsigned int r_bcount;
+ /* Buffers available for use */
+ struct list_head idle_list;
+ unsigned int i_count;
+ unsigned int i_bcount;
+ /* Pointers to all buffers */
+ struct pvr2_buffer **buffers;
+ /* Array size of buffers */
+ unsigned int buffer_slot_count;
+ /* Total buffers actually in circulation */
+ unsigned int buffer_total_count;
+ /* Designed number of buffers to be in circulation */
+ unsigned int buffer_target_count;
+ /* Executed when ready list become non-empty */
+ pvr2_stream_callback callback_func;
+ void *callback_data;
+ /* Context for transfer endpoint */
+ struct usb_device *dev;
+ int endpoint;
+ /* Overhead for mutex enforcement */
+ spinlock_t list_lock;
+ struct mutex mutex;
+ /* Tracking state for tolerating errors */
+ unsigned int fail_count;
+ unsigned int fail_tolerance;
+};
+
+struct pvr2_buffer {
+ int id;
+ int signature;
+ enum pvr2_buffer_state state;
+ void *ptr; /* Pointer to storage area */
+ unsigned int max_count; /* Size of storage area */
+ unsigned int used_count; /* Amount of valid data in storage area */
+ int status; /* Transfer result status */
+ struct pvr2_stream *stream;
+ struct list_head list_overhead;
+ struct urb *purb;
+};
+
+const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
+{
+ switch (st) {
+ case pvr2_buffer_state_none: return "none";
+ case pvr2_buffer_state_idle: return "idle";
+ case pvr2_buffer_state_queued: return "queued";
+ case pvr2_buffer_state_ready: return "ready";
+ }
+ return "unknown";
+}
+
+void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
+{
+ pvr2_trace(PVR2_TRACE_INFO,
+ "buffer%s%s %p state=%s id=%d status=%d"
+ " stream=%p purb=%p sig=0x%x",
+ (msg ? " " : ""),
+ (msg ? msg : ""),
+ bp,
+ (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
+ (bp ? bp->id : 0),
+ (bp ? bp->status : 0),
+ (bp ? bp->stream : 0),
+ (bp ? bp->purb : 0),
+ (bp ? bp->signature : 0));
+}
+
+static void pvr2_buffer_remove(struct pvr2_buffer *bp)
+{
+ unsigned int *cnt;
+ unsigned int *bcnt;
+ unsigned int ccnt;
+ struct pvr2_stream *sp = bp->stream;
+ switch (bp->state) {
+ case pvr2_buffer_state_idle:
+ cnt = &sp->i_count;
+ bcnt = &sp->i_bcount;
+ ccnt = bp->max_count;
+ break;
+ case pvr2_buffer_state_queued:
+ cnt = &sp->q_count;
+ bcnt = &sp->q_bcount;
+ ccnt = bp->max_count;
+ break;
+ case pvr2_buffer_state_ready:
+ cnt = &sp->r_count;
+ bcnt = &sp->r_bcount;
+ ccnt = bp->used_count;
+ break;
+ default:
+ return;
+ }
+ list_del_init(&bp->list_overhead);
+ (*cnt)--;
+ (*bcnt) -= ccnt;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s dec cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
+ bp->state = pvr2_buffer_state_none;
+}
+
+static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_none));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
+{
+ int fl;
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_ready));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ fl = (sp->r_count == 0);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->ready_list);
+ bp->state = pvr2_buffer_state_ready;
+ (sp->r_count)++;
+ sp->r_bcount += bp->used_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->r_bcount,sp->r_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ return fl;
+}
+
+static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_idle));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->idle_list);
+ bp->state = pvr2_buffer_state_idle;
+ (sp->i_count)++;
+ sp->i_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->i_bcount,sp->i_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_queued));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->queued_list);
+ bp->state = pvr2_buffer_state_queued;
+ (sp->q_count)++;
+ sp->q_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->q_bcount,sp->q_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
+{
+ if (bp->state == pvr2_buffer_state_queued) {
+ usb_kill_urb(bp->purb);
+ }
+}
+
+static int pvr2_buffer_init(struct pvr2_buffer *bp,
+ struct pvr2_stream *sp,
+ unsigned int id)
+{
+ memset(bp,0,sizeof(*bp));
+ bp->signature = BUFFER_SIG;
+ bp->id = id;
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
+ bp->stream = sp;
+ bp->state = pvr2_buffer_state_none;
+ INIT_LIST_HEAD(&bp->list_overhead);
+ bp->purb = usb_alloc_urb(0,GFP_KERNEL);
+ if (! bp->purb) return -ENOMEM;
+#ifdef SANITY_CHECK_BUFFERS
+ pvr2_buffer_describe(bp,"create");
+#endif
+ return 0;
+}
+
+static void pvr2_buffer_done(struct pvr2_buffer *bp)
+{
+#ifdef SANITY_CHECK_BUFFERS
+ pvr2_buffer_describe(bp,"delete");
+#endif
+ pvr2_buffer_wipe(bp);
+ pvr2_buffer_set_none(bp);
+ bp->signature = 0;
+ bp->stream = 0;
+ if (bp->purb) usb_free_urb(bp->purb);
+ pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
+ " bufferDone %p",bp);
+}
+
+static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+{
+ int ret;
+ unsigned int scnt;
+
+ /* Allocate buffers pointer array in multiples of 32 entries */
+ if (cnt == sp->buffer_total_count) return 0;
+
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/ poolResize "
+ " stream=%p cur=%d adj=%+d",
+ sp,
+ sp->buffer_total_count,
+ cnt-sp->buffer_total_count);
+
+ scnt = cnt & ~0x1f;
+ if (cnt > scnt) scnt += 0x20;
+
+ if (cnt > sp->buffer_total_count) {
+ if (scnt > sp->buffer_slot_count) {
+ struct pvr2_buffer **nb;
+ nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
+ if (!nb) return -ENOMEM;
+ if (sp->buffer_slot_count) {
+ memcpy(nb,sp->buffers,
+ sp->buffer_slot_count * sizeof(*nb));
+ kfree(sp->buffers);
+ }
+ sp->buffers = nb;
+ sp->buffer_slot_count = scnt;
+ }
+ while (sp->buffer_total_count < cnt) {
+ struct pvr2_buffer *bp;
+ bp = kmalloc(sizeof(*bp),GFP_KERNEL);
+ if (!bp) return -ENOMEM;
+ ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
+ if (ret) {
+ kfree(bp);
+ return -ENOMEM;
+ }
+ sp->buffers[sp->buffer_total_count] = bp;
+ (sp->buffer_total_count)++;
+ pvr2_buffer_set_idle(bp);
+ }
+ } else {
+ while (sp->buffer_total_count > cnt) {
+ struct pvr2_buffer *bp;
+ bp = sp->buffers[sp->buffer_total_count - 1];
+ /* Paranoia */
+ sp->buffers[sp->buffer_total_count - 1] = 0;
+ (sp->buffer_total_count)--;
+ pvr2_buffer_done(bp);
+ kfree(bp);
+ }
+ if (scnt < sp->buffer_slot_count) {
+ struct pvr2_buffer **nb = 0;
+ if (scnt) {
+ nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
+ if (!nb) return -ENOMEM;
+ memcpy(nb,sp->buffers,scnt * sizeof(*nb));
+ }
+ kfree(sp->buffers);
+ sp->buffers = nb;
+ sp->buffer_slot_count = scnt;
+ }
+ }
+ return 0;
+}
+
+static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
+{
+ struct pvr2_buffer *bp;
+ unsigned int cnt;
+
+ if (sp->buffer_total_count == sp->buffer_target_count) return 0;
+
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/"
+ " poolCheck stream=%p cur=%d tgt=%d",
+ sp,sp->buffer_total_count,sp->buffer_target_count);
+
+ if (sp->buffer_total_count < sp->buffer_target_count) {
+ return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
+ }
+
+ cnt = 0;
+ while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
+ bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
+ if (bp->state != pvr2_buffer_state_idle) break;
+ cnt++;
+ }
+ if (cnt) {
+ pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
+ }
+
+ return 0;
+}
+
+static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
+{
+ struct list_head *lp;
+ struct pvr2_buffer *bp1;
+ while ((lp = sp->queued_list.next) != &sp->queued_list) {
+ bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
+ pvr2_buffer_wipe(bp1);
+ /* At this point, we should be guaranteed that no
+ completion callback may happen on this buffer. But it's
+ possible that it might have completed after we noticed
+ it but before we wiped it. So double check its status
+ here first. */
+ if (bp1->state != pvr2_buffer_state_queued) continue;
+ pvr2_buffer_set_idle(bp1);
+ }
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+}
+
+static void pvr2_stream_init(struct pvr2_stream *sp)
+{
+ spin_lock_init(&sp->list_lock);
+ mutex_init(&sp->mutex);
+ INIT_LIST_HEAD(&sp->queued_list);
+ INIT_LIST_HEAD(&sp->ready_list);
+ INIT_LIST_HEAD(&sp->idle_list);
+}
+
+static void pvr2_stream_done(struct pvr2_stream *sp)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ pvr2_stream_buffer_count(sp,0);
+ } while (0); mutex_unlock(&sp->mutex);
+}
+
+static void buffer_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_buffer *bp = urb->context;
+ struct pvr2_stream *sp;
+ unsigned long irq_flags;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ bp->used_count = 0;
+ bp->status = 0;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
+ bp,urb->status,urb->actual_length);
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ if ((!(urb->status)) ||
+ (urb->status == -ENOENT) ||
+ (urb->status == -ECONNRESET) ||
+ (urb->status == -ESHUTDOWN)) {
+ bp->used_count = urb->actual_length;
+ if (sp->fail_count) {
+ pvr2_trace(PVR2_TRACE_TOLERANCE,
+ "stream %p transfer ok"
+ " - fail count reset",sp);
+ sp->fail_count = 0;
+ }
+ } else if (sp->fail_count < sp->fail_tolerance) {
+ // We can tolerate this error, because we're below the
+ // threshold...
+ (sp->fail_count)++;
+ pvr2_trace(PVR2_TRACE_TOLERANCE,
+ "stream %p ignoring error %d"
+ " - fail count increased to %u",
+ sp,urb->status,sp->fail_count);
+ } else {
+ bp->status = urb->status;
+ }
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ pvr2_buffer_set_ready(bp);
+ if (sp && sp->callback_func) {
+ sp->callback_func(sp->callback_data);
+ }
+}
+
+struct pvr2_stream *pvr2_stream_create(void)
+{
+ struct pvr2_stream *sp;
+ sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+ if (!sp) return sp;
+ memset(sp,0,sizeof(*sp));
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
+ pvr2_stream_init(sp);
+ return sp;
+}
+
+void pvr2_stream_destroy(struct pvr2_stream *sp)
+{
+ if (!sp) return;
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
+ pvr2_stream_done(sp);
+ kfree(sp);
+}
+
+void pvr2_stream_setup(struct pvr2_stream *sp,
+ struct usb_device *dev,
+ int endpoint,
+ unsigned int tolerance)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ sp->dev = dev;
+ sp->endpoint = endpoint;
+ sp->fail_tolerance = tolerance;
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+void pvr2_stream_set_callback(struct pvr2_stream *sp,
+ pvr2_stream_callback func,
+ void *data)
+{
+ unsigned long irq_flags;
+ mutex_lock(&sp->mutex); do {
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ sp->callback_data = data;
+ sp->callback_func = func;
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+/* Query / set the nominal buffer count */
+int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
+{
+ return sp->buffer_target_count;
+}
+
+int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+{
+ int ret;
+ if (sp->buffer_target_count == cnt) return 0;
+ mutex_lock(&sp->mutex); do {
+ sp->buffer_target_count = cnt;
+ ret = pvr2_stream_achieve_buffer_count(sp);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
+{
+ struct list_head *lp = sp->idle_list.next;
+ if (lp == &sp->idle_list) return 0;
+ return list_entry(lp,struct pvr2_buffer,list_overhead);
+}
+
+struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
+{
+ struct list_head *lp = sp->ready_list.next;
+ if (lp == &sp->ready_list) return 0;
+ return list_entry(lp,struct pvr2_buffer,list_overhead);
+}
+
+struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
+{
+ if (id < 0) return 0;
+ if (id >= sp->buffer_total_count) return 0;
+ return sp->buffers[id];
+}
+
+int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
+{
+ return sp->r_count;
+}
+
+int pvr2_stream_get_idle_count(struct pvr2_stream *sp)
+{
+ return sp->i_count;
+}
+
+void pvr2_stream_flush(struct pvr2_stream *sp)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+void pvr2_stream_kill(struct pvr2_stream *sp)
+{
+ struct pvr2_buffer *bp;
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) {
+ pvr2_buffer_set_idle(bp);
+ }
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+int pvr2_buffer_queue(struct pvr2_buffer *bp)
+{
+#undef SEED_BUFFER
+#ifdef SEED_BUFFER
+ unsigned int idx;
+ unsigned int val;
+#endif
+ int ret = 0;
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ pvr2_buffer_wipe(bp);
+ if (!sp->dev) {
+ ret = -EIO;
+ break;
+ }
+ pvr2_buffer_set_queued(bp);
+#ifdef SEED_BUFFER
+ for (idx = 0; idx < (bp->max_count) / 4; idx++) {
+ val = bp->id << 24;
+ val |= idx;
+ ((unsigned int *)(bp->ptr))[idx] = val;
+ }
+#endif
+ bp->status = -EINPROGRESS;
+ usb_fill_bulk_urb(bp->purb, // struct urb *urb
+ sp->dev, // struct usb_device *dev
+ // endpoint (below)
+ usb_rcvbulkpipe(sp->dev,sp->endpoint),
+ bp->ptr, // void *transfer_buffer
+ bp->max_count, // int buffer_length
+ buffer_complete,
+ bp);
+ usb_submit_urb(bp->purb,GFP_KERNEL);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+int pvr2_buffer_idle(struct pvr2_buffer *bp)
+{
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ pvr2_buffer_wipe(bp);
+ pvr2_buffer_set_idle(bp);
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+ } while(0); mutex_unlock(&sp->mutex);
+ return 0;
+}
+
+int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
+{
+ int ret = 0;
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ if (bp->state != pvr2_buffer_state_idle) {
+ ret = -EPERM;
+ } else {
+ bp->ptr = ptr;
+ bp->stream->i_bcount -= bp->max_count;
+ bp->max_count = cnt;
+ bp->stream->i_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferPool "
+ " %8s cap cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(
+ pvr2_buffer_state_idle),
+ bp->stream->i_bcount,bp->stream->i_count);
+ }
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
+{
+ return bp->used_count;
+}
+
+int pvr2_buffer_get_status(struct pvr2_buffer *bp)
+{
+ return bp->status;
+}
+
+enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *bp)
+{
+ return bp->state;
+}
+
+int pvr2_buffer_get_id(struct pvr2_buffer *bp)
+{
+ return bp->id;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.h b/drivers/media/video/pvrusb2/pvrusb2-io.h
new file mode 100644
index 000000000000..65e11385b2b3
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_IO_H
+#define __PVRUSB2_IO_H
+
+#include <linux/usb.h>
+#include <linux/list.h>
+
+typedef void (*pvr2_stream_callback)(void *);
+
+enum pvr2_buffer_state {
+ pvr2_buffer_state_none = 0, // Not on any list
+ pvr2_buffer_state_idle = 1, // Buffer is ready to be used again
+ pvr2_buffer_state_queued = 2, // Buffer has been queued for filling
+ pvr2_buffer_state_ready = 3, // Buffer has data available
+};
+
+struct pvr2_stream;
+struct pvr2_buffer;
+
+const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
+
+/* Initialize / tear down stream structure */
+struct pvr2_stream *pvr2_stream_create(void);
+void pvr2_stream_destroy(struct pvr2_stream *);
+void pvr2_stream_setup(struct pvr2_stream *,
+ struct usb_device *dev,int endpoint,
+ unsigned int tolerance);
+void pvr2_stream_set_callback(struct pvr2_stream *,
+ pvr2_stream_callback func,
+ void *data);
+
+/* Query / set the nominal buffer count */
+int pvr2_stream_get_buffer_count(struct pvr2_stream *);
+int pvr2_stream_set_buffer_count(struct pvr2_stream *,unsigned int);
+
+/* Get a pointer to a buffer that is either idle, ready, or is specified
+ named. */
+struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *);
+struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *);
+struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id);
+
+/* Find out how many buffers are idle or ready */
+int pvr2_stream_get_idle_count(struct pvr2_stream *);
+int pvr2_stream_get_ready_count(struct pvr2_stream *);
+
+/* Kill all pending operations */
+void pvr2_stream_flush(struct pvr2_stream *);
+
+/* Kill all pending buffers and throw away any ready buffers as well */
+void pvr2_stream_kill(struct pvr2_stream *);
+
+/* Set up the actual storage for a buffer */
+int pvr2_buffer_set_buffer(struct pvr2_buffer *,void *ptr,unsigned int cnt);
+
+/* Find out size of data in the given ready buffer */
+unsigned int pvr2_buffer_get_count(struct pvr2_buffer *);
+
+/* Retrieve completion code for given ready buffer */
+int pvr2_buffer_get_status(struct pvr2_buffer *);
+
+/* Retrieve state of given buffer */
+enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *);
+
+/* Retrieve ID of given buffer */
+int pvr2_buffer_get_id(struct pvr2_buffer *);
+
+/* Start reading into given buffer (kill it if needed) */
+int pvr2_buffer_queue(struct pvr2_buffer *);
+
+/* Move buffer back to idle pool (kill it if needed) */
+int pvr2_buffer_idle(struct pvr2_buffer *);
+
+#endif /* __PVRUSB2_IO_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
new file mode 100644
index 000000000000..49da062e3271
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
@@ -0,0 +1,513 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-ioread.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+
+#define BUFFER_COUNT 32
+#define BUFFER_SIZE PAGE_ALIGN(0x4000)
+
+struct pvr2_ioread {
+ struct pvr2_stream *stream;
+ char *buffer_storage[BUFFER_COUNT];
+ char *sync_key_ptr;
+ unsigned int sync_key_len;
+ unsigned int sync_buf_offs;
+ unsigned int sync_state;
+ unsigned int sync_trashed_count;
+ int enabled; // Streaming is on
+ int spigot_open; // OK to pass data to client
+ int stream_running; // Passing data to client now
+
+ /* State relevant to current buffer being read */
+ struct pvr2_buffer *c_buf;
+ char *c_data_ptr;
+ unsigned int c_data_len;
+ unsigned int c_data_offs;
+ struct mutex mutex;
+};
+
+static int pvr2_ioread_init(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+
+ cp->stream = 0;
+ mutex_init(&cp->mutex);
+
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL);
+ if (!(cp->buffer_storage[idx])) break;
+ }
+
+ if (idx < BUFFER_COUNT) {
+ // An allocation appears to have failed
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ if (!(cp->buffer_storage[idx])) continue;
+ kfree(cp->buffer_storage[idx]);
+ }
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void pvr2_ioread_done(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+
+ pvr2_ioread_setup(cp,0);
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ if (!(cp->buffer_storage[idx])) continue;
+ kfree(cp->buffer_storage[idx]);
+ }
+}
+
+struct pvr2_ioread *pvr2_ioread_create(void)
+{
+ struct pvr2_ioread *cp;
+ cp = kmalloc(sizeof(*cp),GFP_KERNEL);
+ if (!cp) return 0;
+ pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp);
+ memset(cp,0,sizeof(*cp));
+ if (pvr2_ioread_init(cp) < 0) {
+ kfree(cp);
+ return 0;
+ }
+ return cp;
+}
+
+void pvr2_ioread_destroy(struct pvr2_ioread *cp)
+{
+ if (!cp) return;
+ pvr2_ioread_done(cp);
+ pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp);
+ if (cp->sync_key_ptr) {
+ kfree(cp->sync_key_ptr);
+ cp->sync_key_ptr = 0;
+ }
+ kfree(cp);
+}
+
+void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp,
+ const char *sync_key_ptr,
+ unsigned int sync_key_len)
+{
+ if (!cp) return;
+
+ if (!sync_key_ptr) sync_key_len = 0;
+ if ((sync_key_len == cp->sync_key_len) &&
+ ((!sync_key_len) ||
+ (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return;
+
+ if (sync_key_len != cp->sync_key_len) {
+ if (cp->sync_key_ptr) {
+ kfree(cp->sync_key_ptr);
+ cp->sync_key_ptr = 0;
+ }
+ cp->sync_key_len = 0;
+ if (sync_key_len) {
+ cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL);
+ if (cp->sync_key_ptr) {
+ cp->sync_key_len = sync_key_len;
+ }
+ }
+ }
+ if (!cp->sync_key_len) return;
+ memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len);
+}
+
+static void pvr2_ioread_stop(struct pvr2_ioread *cp)
+{
+ if (!(cp->enabled)) return;
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp);
+ pvr2_stream_kill(cp->stream);
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ cp->enabled = 0;
+ cp->stream_running = 0;
+ cp->spigot_open = 0;
+ if (cp->sync_state) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ sync_state <== 0");
+ cp->sync_state = 0;
+ }
+}
+
+static int pvr2_ioread_start(struct pvr2_ioread *cp)
+{
+ int stat;
+ struct pvr2_buffer *bp;
+ if (cp->enabled) return 0;
+ if (!(cp->stream)) return 0;
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp);
+ while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != 0) {
+ stat = pvr2_buffer_queue(bp);
+ if (stat < 0) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_start id=%p"
+ " error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ return stat;
+ }
+ }
+ cp->enabled = !0;
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ cp->stream_running = 0;
+ if (cp->sync_key_len) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ sync_state <== 1");
+ cp->sync_state = 1;
+ cp->sync_trashed_count = 0;
+ cp->sync_buf_offs = 0;
+ }
+ cp->spigot_open = 0;
+ return 0;
+}
+
+struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp)
+{
+ return cp->stream;
+}
+
+int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
+{
+ int ret;
+ unsigned int idx;
+ struct pvr2_buffer *bp;
+
+ mutex_lock(&cp->mutex); do {
+ if (cp->stream) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_setup (tear-down) id=%p",cp);
+ pvr2_ioread_stop(cp);
+ pvr2_stream_kill(cp->stream);
+ pvr2_stream_set_buffer_count(cp->stream,0);
+ cp->stream = 0;
+ }
+ if (sp) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_setup (setup) id=%p",cp);
+ pvr2_stream_kill(sp);
+ ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
+ if (ret < 0) return ret;
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ bp = pvr2_stream_get_buffer(sp,idx);
+ pvr2_buffer_set_buffer(bp,
+ cp->buffer_storage[idx],
+ BUFFER_SIZE);
+ }
+ cp->stream = sp;
+ }
+ } while (0); mutex_unlock(&cp->mutex);
+
+ return 0;
+}
+
+int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl)
+{
+ int ret = 0;
+ if ((!fl) == (!(cp->enabled))) return ret;
+
+ mutex_lock(&cp->mutex); do {
+ if (fl) {
+ ret = pvr2_ioread_start(cp);
+ } else {
+ pvr2_ioread_stop(cp);
+ }
+ } while (0); mutex_unlock(&cp->mutex);
+ return ret;
+}
+
+int pvr2_ioread_get_enabled(struct pvr2_ioread *cp)
+{
+ return cp->enabled != 0;
+}
+
+int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
+{
+ int stat;
+
+ while (cp->c_data_len <= cp->c_data_offs) {
+ if (cp->c_buf) {
+ // Flush out current buffer first.
+ stat = pvr2_buffer_queue(cp->c_buf);
+ if (stat < 0) {
+ // Streaming error...
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_read id=%p"
+ " queue_error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ return 0;
+ }
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ }
+ // Now get a freshly filled buffer.
+ cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream);
+ if (!cp->c_buf) break; // Nothing ready; done.
+ cp->c_data_len = pvr2_buffer_get_count(cp->c_buf);
+ if (!cp->c_data_len) {
+ // Nothing transferred. Was there an error?
+ stat = pvr2_buffer_get_status(cp->c_buf);
+ if (stat < 0) {
+ // Streaming error...
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_read id=%p"
+ " buffer_error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ // Give up.
+ return 0;
+ }
+ // Start over...
+ continue;
+ }
+ cp->c_data_offs = 0;
+ cp->c_data_ptr = cp->buffer_storage[
+ pvr2_buffer_get_id(cp->c_buf)];
+ }
+ return !0;
+}
+
+void pvr2_ioread_filter(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+ if (!cp->enabled) return;
+ if (cp->sync_state != 1) return;
+
+ // Search the stream for our synchronization key. This is made
+ // complicated by the fact that in order to be honest with
+ // ourselves here we must search across buffer boundaries...
+ mutex_lock(&cp->mutex); while (1) {
+ // Ensure we have a buffer
+ if (!pvr2_ioread_get_buffer(cp)) break;
+ if (!cp->c_data_len) break;
+
+ // Now walk the buffer contents until we match the key or
+ // run out of buffer data.
+ for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) {
+ if (cp->sync_buf_offs >= cp->sync_key_len) break;
+ if (cp->c_data_ptr[idx] ==
+ cp->sync_key_ptr[cp->sync_buf_offs]) {
+ // Found the next key byte
+ (cp->sync_buf_offs)++;
+ } else {
+ // Whoops, mismatched. Start key over...
+ cp->sync_buf_offs = 0;
+ }
+ }
+
+ // Consume what we've walked through
+ cp->c_data_offs += idx;
+ cp->sync_trashed_count += idx;
+
+ // If we've found the key, then update state and get out.
+ if (cp->sync_buf_offs >= cp->sync_key_len) {
+ cp->sync_trashed_count -= cp->sync_key_len;
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " sync_state <== 2 (skipped %u bytes)",
+ cp->sync_trashed_count);
+ cp->sync_state = 2;
+ cp->sync_buf_offs = 0;
+ break;
+ }
+
+ if (cp->c_data_offs < cp->c_data_len) {
+ // Sanity check - should NEVER get here
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ERROR: pvr2_ioread filter sync problem"
+ " len=%u offs=%u",
+ cp->c_data_len,cp->c_data_offs);
+ // Get out so we don't get stuck in an infinite
+ // loop.
+ break;
+ }
+
+ continue; // (for clarity)
+ } mutex_unlock(&cp->mutex);
+}
+
+int pvr2_ioread_avail(struct pvr2_ioread *cp)
+{
+ int ret;
+ if (!(cp->enabled)) {
+ // Stream is not enabled; so this is an I/O error
+ return -EIO;
+ }
+
+ if (cp->sync_state == 1) {
+ pvr2_ioread_filter(cp);
+ if (cp->sync_state == 1) return -EAGAIN;
+ }
+
+ ret = 0;
+ if (cp->stream_running) {
+ if (!pvr2_stream_get_ready_count(cp->stream)) {
+ // No data available at all right now.
+ ret = -EAGAIN;
+ }
+ } else {
+ if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) {
+ // Haven't buffered up enough yet; try again later
+ ret = -EAGAIN;
+ }
+ }
+
+ if ((!(cp->spigot_open)) != (!(ret == 0))) {
+ cp->spigot_open = (ret == 0);
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ data is %s",
+ cp->spigot_open ? "available" : "pending");
+ }
+
+ return ret;
+}
+
+int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
+{
+ unsigned int copied_cnt;
+ unsigned int bcnt;
+ const char *src;
+ int stat;
+ int ret = 0;
+ unsigned int req_cnt = cnt;
+
+ if (!cnt) {
+ pvr2_trace(PVR2_TRACE_TRAP,
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
+ " ZERO Request? Returning zero.",cp);
+ return 0;
+ }
+
+ stat = pvr2_ioread_avail(cp);
+ if (stat < 0) return stat;
+
+ cp->stream_running = !0;
+
+ mutex_lock(&cp->mutex); do {
+
+ // Suck data out of the buffers and copy to the user
+ copied_cnt = 0;
+ if (!buf) cnt = 0;
+ while (1) {
+ if (!pvr2_ioread_get_buffer(cp)) {
+ ret = -EIO;
+ break;
+ }
+
+ if (!cnt) break;
+
+ if (cp->sync_state == 2) {
+ // We're repeating the sync key data into
+ // the stream.
+ src = cp->sync_key_ptr + cp->sync_buf_offs;
+ bcnt = cp->sync_key_len - cp->sync_buf_offs;
+ } else {
+ // Normal buffer copy
+ src = cp->c_data_ptr + cp->c_data_offs;
+ bcnt = cp->c_data_len - cp->c_data_offs;
+ }
+
+ if (!bcnt) break;
+
+ // Don't run past user's buffer
+ if (bcnt > cnt) bcnt = cnt;
+
+ if (copy_to_user(buf,src,bcnt)) {
+ // User supplied a bad pointer?
+ // Give up - this *will* cause data
+ // to be lost.
+ ret = -EFAULT;
+ break;
+ }
+ cnt -= bcnt;
+ buf += bcnt;
+ copied_cnt += bcnt;
+
+ if (cp->sync_state == 2) {
+ // Update offset inside sync key that we're
+ // repeating back out.
+ cp->sync_buf_offs += bcnt;
+ if (cp->sync_buf_offs >= cp->sync_key_len) {
+ // Consumed entire key; switch mode
+ // to normal.
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " sync_state <== 0");
+ cp->sync_state = 0;
+ }
+ } else {
+ // Update buffer offset.
+ cp->c_data_offs += bcnt;
+ }
+ }
+
+ } while (0); mutex_unlock(&cp->mutex);
+
+ if (!ret) {
+ if (copied_cnt) {
+ // If anything was copied, return that count
+ ret = copied_cnt;
+ } else {
+ // Nothing copied; suggest to caller that another
+ // attempt should be tried again later
+ ret = -EAGAIN;
+ }
+ }
+
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ pvr2_ioread_read"
+ " id=%p request=%d result=%d",
+ cp,req_cnt,ret);
+ return ret;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.h b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
new file mode 100644
index 000000000000..6b002597f5de
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_IOREAD_H
+#define __PVRUSB2_IOREAD_H
+
+#include "pvrusb2-io.h"
+
+struct pvr2_ioread;
+
+struct pvr2_ioread *pvr2_ioread_create(void);
+void pvr2_ioread_destroy(struct pvr2_ioread *);
+int pvr2_ioread_setup(struct pvr2_ioread *,struct pvr2_stream *);
+struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *);
+void pvr2_ioread_set_sync_key(struct pvr2_ioread *,
+ const char *sync_key_ptr,
+ unsigned int sync_key_len);
+int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl);
+int pvr2_ioread_get_enabled(struct pvr2_ioread *);
+int pvr2_ioread_read(struct pvr2_ioread *,void __user *buf,unsigned int cnt);
+int pvr2_ioread_avail(struct pvr2_ioread *);
+
+#endif /* __PVRUSB2_IOREAD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
new file mode 100644
index 000000000000..b95248274ed0
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -0,0 +1,172 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-context.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-v4l2.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+#include "pvrusb2-sysfs.h"
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+#define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>"
+#define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner"
+#define DRIVER_VERSION "V4L in-tree version"
+
+#define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \
+ PVR2_TRACE_INFO| \
+ PVR2_TRACE_TOLERANCE| \
+ PVR2_TRACE_TRAP| \
+ 0)
+
+int pvrusb2_debug = DEFAULT_DEBUG_MASK;
+
+module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug trace mask");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+static struct pvr2_sysfs_class *class_ptr = 0;
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+static void pvr_setup_attach(struct pvr2_context *pvr)
+{
+ /* Create association with v4l layer */
+ pvr2_v4l2_create(pvr);
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ pvr2_sysfs_create(pvr,class_ptr);
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+}
+
+static int pvr_probe(struct usb_interface *intf,
+ const struct usb_device_id *devid)
+{
+ struct pvr2_context *pvr;
+
+ /* Create underlying hardware interface */
+ pvr = pvr2_context_create(intf,devid,pvr_setup_attach);
+ if (!pvr) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to create hdw handler");
+ return -ENOMEM;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr);
+
+ usb_set_intfdata(intf, pvr);
+
+ return 0;
+}
+
+/*
+ * pvr_disconnect()
+ *
+ */
+static void pvr_disconnect(struct usb_interface *intf)
+{
+ struct pvr2_context *pvr = usb_get_intfdata(intf);
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr);
+
+ usb_set_intfdata (intf, NULL);
+ pvr2_context_disconnect(pvr);
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr);
+
+}
+
+static struct usb_driver pvr_driver = {
+ name: "pvrusb2",
+ id_table: pvr2_device_table,
+ probe: pvr_probe,
+ disconnect: pvr_disconnect
+};
+
+/*
+ * pvr_init() / pvr_exit()
+ *
+ * This code is run to initialize/exit the driver.
+ *
+ */
+static int __init pvr_init(void)
+{
+ int ret;
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_init");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ class_ptr = pvr2_sysfs_class_create();
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+ ret = usb_register(&pvr_driver);
+
+ if (ret == 0)
+ info(DRIVER_DESC " : " DRIVER_VERSION);
+ if (pvrusb2_debug) info("Debug mask is %d (0x%x)",
+ pvrusb2_debug,pvrusb2_debug);
+
+ return ret;
+}
+
+static void __exit pvr_exit(void)
+{
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_exit");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ pvr2_sysfs_class_destroy(class_ptr);
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+ usb_deregister(&pvr_driver);
+}
+
+module_init(pvr_init);
+module_exit(pvr_exit);
+
+/* Mike Isely <mcisely@pobox.com> 11-Mar-2006: See pvrusb2-hdw.c for
+ MODULE_DEVICE_TABLE(). We have to declare that attribute there
+ because that's where the device table actually is now and it seems
+ that certain gcc configurations get angry if MODULE_DEVICE_TABLE()
+ is used on what ends up being an external symbol. */
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.c b/drivers/media/video/pvrusb2/pvrusb2-std.c
new file mode 100644
index 000000000000..134063693643
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.c
@@ -0,0 +1,408 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-std.h"
+#include "pvrusb2-debug.h"
+#include <asm/string.h>
+#include <linux/slab.h>
+
+struct std_name {
+ const char *name;
+ v4l2_std_id id;
+};
+
+
+#define CSTD_PAL \
+ (V4L2_STD_PAL_B| \
+ V4L2_STD_PAL_B1| \
+ V4L2_STD_PAL_G| \
+ V4L2_STD_PAL_H| \
+ V4L2_STD_PAL_I| \
+ V4L2_STD_PAL_D| \
+ V4L2_STD_PAL_D1| \
+ V4L2_STD_PAL_K| \
+ V4L2_STD_PAL_M| \
+ V4L2_STD_PAL_N| \
+ V4L2_STD_PAL_Nc| \
+ V4L2_STD_PAL_60)
+
+#define CSTD_NTSC \
+ (V4L2_STD_NTSC_M| \
+ V4L2_STD_NTSC_M_JP| \
+ V4L2_STD_NTSC_M_KR| \
+ V4L2_STD_NTSC_443)
+
+#define CSTD_SECAM \
+ (V4L2_STD_SECAM_B| \
+ V4L2_STD_SECAM_D| \
+ V4L2_STD_SECAM_G| \
+ V4L2_STD_SECAM_H| \
+ V4L2_STD_SECAM_K| \
+ V4L2_STD_SECAM_K1| \
+ V4L2_STD_SECAM_L| \
+ V4L2_STD_SECAM_LC)
+
+#define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B)
+#define TSTD_B1 (V4L2_STD_PAL_B1)
+#define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D)
+#define TSTD_D1 (V4L2_STD_PAL_D1)
+#define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G)
+#define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H)
+#define TSTD_I (V4L2_STD_PAL_I)
+#define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K)
+#define TSTD_K1 (V4L2_STD_SECAM_K1)
+#define TSTD_L (V4L2_STD_SECAM_L)
+#define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M)
+#define TSTD_N (V4L2_STD_PAL_N)
+#define TSTD_Nc (V4L2_STD_PAL_Nc)
+#define TSTD_60 (V4L2_STD_PAL_60)
+
+#define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_SECAM)
+
+/* Mapping of standard bits to color system */
+const static struct std_name std_groups[] = {
+ {"PAL",CSTD_PAL},
+ {"NTSC",CSTD_NTSC},
+ {"SECAM",CSTD_SECAM},
+};
+
+/* Mapping of standard bits to modulation system */
+const static struct std_name std_items[] = {
+ {"B",TSTD_B},
+ {"B1",TSTD_B1},
+ {"D",TSTD_D},
+ {"D1",TSTD_D1},
+ {"G",TSTD_G},
+ {"H",TSTD_H},
+ {"I",TSTD_I},
+ {"K",TSTD_K},
+ {"K1",TSTD_K1},
+ {"L",TSTD_L},
+ {"LC",V4L2_STD_SECAM_LC},
+ {"M",TSTD_M},
+ {"Mj",V4L2_STD_NTSC_M_JP},
+ {"443",V4L2_STD_NTSC_443},
+ {"Mk",V4L2_STD_NTSC_M_KR},
+ {"N",TSTD_N},
+ {"Nc",TSTD_Nc},
+ {"60",TSTD_60},
+};
+
+
+// Search an array of std_name structures and return a pointer to the
+// element with the matching name.
+static const struct std_name *find_std_name(const struct std_name *arrPtr,
+ unsigned int arrSize,
+ const char *bufPtr,
+ unsigned int bufSize)
+{
+ unsigned int idx;
+ const struct std_name *p;
+ for (idx = 0; idx < arrSize; idx++) {
+ p = arrPtr + idx;
+ if (strlen(p->name) != bufSize) continue;
+ if (!memcmp(bufPtr,p->name,bufSize)) return p;
+ }
+ return 0;
+}
+
+
+int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
+ unsigned int bufSize)
+{
+ v4l2_std_id id = 0;
+ v4l2_std_id cmsk = 0;
+ v4l2_std_id t;
+ int mMode = 0;
+ unsigned int cnt;
+ char ch;
+ const struct std_name *sp;
+
+ while (bufSize) {
+ if (!mMode) {
+ cnt = 0;
+ while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++;
+ if (cnt >= bufSize) return 0; // No more characters
+ sp = find_std_name(
+ std_groups,
+ sizeof(std_groups)/sizeof(std_groups[0]),
+ bufPtr,cnt);
+ if (!sp) return 0; // Illegal color system name
+ cnt++;
+ bufPtr += cnt;
+ bufSize -= cnt;
+ mMode = !0;
+ cmsk = sp->id;
+ continue;
+ }
+ cnt = 0;
+ while (cnt < bufSize) {
+ ch = bufPtr[cnt];
+ if (ch == ';') {
+ mMode = 0;
+ break;
+ }
+ if (ch == '/') break;
+ cnt++;
+ }
+ sp = find_std_name(std_items,
+ sizeof(std_items)/sizeof(std_items[0]),
+ bufPtr,cnt);
+ if (!sp) return 0; // Illegal modulation system ID
+ t = sp->id & cmsk;
+ if (!t) return 0; // Specific color + modulation system illegal
+ id |= t;
+ if (cnt < bufSize) cnt++;
+ bufPtr += cnt;
+ bufSize -= cnt;
+ }
+
+ if (idPtr) *idPtr = id;
+ return !0;
+}
+
+
+unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
+ v4l2_std_id id)
+{
+ unsigned int idx1,idx2;
+ const struct std_name *ip,*gp;
+ int gfl,cfl;
+ unsigned int c1,c2;
+ cfl = 0;
+ c1 = 0;
+ for (idx1 = 0;
+ idx1 < sizeof(std_groups)/sizeof(std_groups[0]);
+ idx1++) {
+ gp = std_groups + idx1;
+ gfl = 0;
+ for (idx2 = 0;
+ idx2 < sizeof(std_items)/sizeof(std_items[0]);
+ idx2++) {
+ ip = std_items + idx2;
+ if (!(gp->id & ip->id & id)) continue;
+ if (!gfl) {
+ if (cfl) {
+ c2 = scnprintf(bufPtr,bufSize,";");
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ }
+ cfl = !0;
+ c2 = scnprintf(bufPtr,bufSize,
+ "%s-",gp->name);
+ gfl = !0;
+ } else {
+ c2 = scnprintf(bufPtr,bufSize,"/");
+ }
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ c2 = scnprintf(bufPtr,bufSize,
+ ip->name);
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ }
+ }
+ return c1;
+}
+
+
+// Template data for possible enumerated video standards. Here we group
+// standards which share common frame rates and resolution.
+static struct v4l2_standard generic_standards[] = {
+ {
+ .id = (TSTD_B|TSTD_B1|
+ TSTD_D|TSTD_D1|
+ TSTD_G|
+ TSTD_H|
+ TSTD_I|
+ TSTD_K|TSTD_K1|
+ TSTD_L|
+ V4L2_STD_SECAM_LC |
+ TSTD_N|TSTD_Nc),
+ .frameperiod =
+ {
+ .numerator = 1,
+ .denominator= 25
+ },
+ .framelines = 625,
+ .reserved = {0,0,0,0}
+ }, {
+ .id = (TSTD_M|
+ V4L2_STD_NTSC_M_JP|
+ V4L2_STD_NTSC_M_KR),
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }, { // This is a total wild guess
+ .id = (TSTD_60),
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }, { // This is total wild guess
+ .id = V4L2_STD_NTSC_443,
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }
+};
+
+#define generic_standards_cnt (sizeof(generic_standards)/sizeof(generic_standards[0]))
+
+static struct v4l2_standard *match_std(v4l2_std_id id)
+{
+ unsigned int idx;
+ for (idx = 0; idx < generic_standards_cnt; idx++) {
+ if (generic_standards[idx].id & id) {
+ return generic_standards + idx;
+ }
+ }
+ return 0;
+}
+
+static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id)
+{
+ struct v4l2_standard *template;
+ int idx;
+ unsigned int bcnt;
+ template = match_std(id);
+ if (!template) return 0;
+ idx = std->index;
+ memcpy(std,template,sizeof(*template));
+ std->index = idx;
+ std->id = id;
+ bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id);
+ std->name[bcnt] = 0;
+ pvr2_trace(PVR2_TRACE_INIT,"Set up standard idx=%u name=%s",
+ std->index,std->name);
+ return !0;
+}
+
+/* These are special cases of combined standards that we should enumerate
+ separately if the component pieces are present. */
+static v4l2_std_id std_mixes[] = {
+ V4L2_STD_PAL_B | V4L2_STD_PAL_G,
+ V4L2_STD_PAL_D | V4L2_STD_PAL_K,
+ V4L2_STD_SECAM_B | V4L2_STD_SECAM_G,
+ V4L2_STD_SECAM_D | V4L2_STD_SECAM_K,
+};
+
+struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
+ v4l2_std_id id)
+{
+ unsigned int std_cnt = 0;
+ unsigned int idx,bcnt,idx2;
+ v4l2_std_id idmsk,cmsk,fmsk;
+ struct v4l2_standard *stddefs;
+
+ if (pvrusb2_debug & PVR2_TRACE_INIT) {
+ char buf[50];
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id);
+ pvr2_trace(
+ PVR2_TRACE_INIT,"Mapping standards mask=0x%x (%.*s)",
+ (int)id,bcnt,buf);
+ }
+
+ *countptr = 0;
+ std_cnt = 0;
+ fmsk = 0;
+ for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) {
+ if (!(idmsk & cmsk)) continue;
+ cmsk &= ~idmsk;
+ if (match_std(idmsk)) {
+ std_cnt++;
+ continue;
+ }
+ fmsk |= idmsk;
+ }
+
+ for (idx2 = 0; idx2 < sizeof(std_mixes)/sizeof(std_mixes[0]); idx2++) {
+ if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++;
+ }
+
+ if (fmsk) {
+ char buf[50];
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " Failed to classify the following standard(s): %.*s",
+ bcnt,buf);
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,"Setting up %u unique standard(s)",
+ std_cnt);
+ if (!std_cnt) return 0; // paranoia
+
+ stddefs = kmalloc(sizeof(struct v4l2_standard) * std_cnt,
+ GFP_KERNEL);
+ memset(stddefs,0,sizeof(struct v4l2_standard) * std_cnt);
+ for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx;
+
+ idx = 0;
+
+ /* Enumerate potential special cases */
+ for (idx2 = 0; ((idx2 < sizeof(std_mixes)/sizeof(std_mixes[0])) &&
+ (idx < std_cnt)); idx2++) {
+ if (!(id & std_mixes[idx2])) continue;
+ if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++;
+ }
+ /* Now enumerate individual pieces */
+ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) {
+ if (!(idmsk & cmsk)) continue;
+ cmsk &= ~idmsk;
+ if (!pvr2_std_fill(stddefs+idx,idmsk)) continue;
+ idx++;
+ }
+
+ *countptr = std_cnt;
+ return stddefs;
+}
+
+v4l2_std_id pvr2_std_get_usable(void)
+{
+ return CSTD_ALL;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.h b/drivers/media/video/pvrusb2/pvrusb2-std.h
new file mode 100644
index 000000000000..07c399375341
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_STD_H
+#define __PVRUSB2_STD_H
+
+#include <linux/videodev2.h>
+
+// Convert string describing one or more video standards into a mask of V4L
+// standard bits. Return true if conversion succeeds otherwise return
+// false. String is expected to be of the form: C1-x/y;C2-a/b where C1 and
+// C2 are color system names (e.g. "PAL", "NTSC") and x, y, a, and b are
+// modulation schemes (e.g. "M", "B", "G", etc).
+int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
+ unsigned int bufSize);
+
+// Convert any arbitrary set of video standard bits into an unambiguous
+// readable string. Return value is the number of bytes consumed in the
+// buffer. The formatted string is of a form that can be parsed by our
+// sibling std_std_to_id() function.
+unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
+ v4l2_std_id id);
+
+// Create an array of suitable v4l2_standard structures given a bit mask of
+// video standards to support. The array is allocated from the heap, and
+// the number of elements is returned in the first argument.
+struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
+ v4l2_std_id id);
+
+// Return mask of which video standard bits are valid
+v4l2_std_id pvr2_std_get_usable(void);
+
+#endif /* __PVRUSB2_STD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
new file mode 100644
index 000000000000..c6e6523d74b4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -0,0 +1,865 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+#include "pvrusb2-sysfs.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+#include "pvrusb2-debugifc.h"
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+#define pvr2_sysfs_trace(...) pvr2_trace(PVR2_TRACE_SYSFS,__VA_ARGS__)
+
+struct pvr2_sysfs {
+ struct pvr2_channel channel;
+ struct class_device *class_dev;
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ struct pvr2_sysfs_debugifc *debugifc;
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+ struct pvr2_sysfs_ctl_item *item_first;
+ struct pvr2_sysfs_ctl_item *item_last;
+ struct sysfs_ops kops;
+ struct kobj_type ktype;
+ struct class_device_attribute attr_v4l_minor_number;
+ struct class_device_attribute attr_unit_number;
+};
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+struct pvr2_sysfs_debugifc {
+ struct class_device_attribute attr_debugcmd;
+ struct class_device_attribute attr_debuginfo;
+};
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+struct pvr2_sysfs_ctl_item {
+ struct class_device_attribute attr_name;
+ struct class_device_attribute attr_type;
+ struct class_device_attribute attr_min;
+ struct class_device_attribute attr_max;
+ struct class_device_attribute attr_enum;
+ struct class_device_attribute attr_bits;
+ struct class_device_attribute attr_val;
+ struct class_device_attribute attr_custom;
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *chptr;
+ struct pvr2_sysfs_ctl_item *item_next;
+ struct attribute *attr_gen[7];
+ struct attribute_group grp;
+ char name[80];
+};
+
+struct pvr2_sysfs_class {
+ struct class class;
+};
+
+static ssize_t show_name(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ const char *name;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ name = pvr2_ctrl_get_desc(cptr);
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_name(cid=%d) is %s",sfp,id,name);
+
+ if (!name) return -EINVAL;
+
+ return scnprintf(buf,PAGE_SIZE,"%s\n",name);
+}
+
+static ssize_t show_type(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ const char *name;
+ enum pvr2_ctl_type tp;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ tp = pvr2_ctrl_get_type(cptr);
+ switch (tp) {
+ case pvr2_ctl_int: name = "integer"; break;
+ case pvr2_ctl_enum: name = "enum"; break;
+ case pvr2_ctl_bitmask: name = "bitmask"; break;
+ case pvr2_ctl_bool: name = "boolean"; break;
+ default: name = "?"; break;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",sfp,id,name);
+
+ if (!name) return -EINVAL;
+
+ return scnprintf(buf,PAGE_SIZE,"%s\n",name);
+}
+
+static ssize_t show_min(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ val = pvr2_ctrl_get_min(cptr);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_min(cid=%d) is %ld",sfp,id,val);
+
+ return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
+}
+
+static ssize_t show_max(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ val = pvr2_ctrl_get_max(cptr);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_max(cid=%d) is %ld",sfp,id,val);
+
+ return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
+}
+
+static ssize_t show_val_norm(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int val,ret;
+ unsigned int cnt = 0;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ if (ret < 0) return ret;
+
+ ret = pvr2_ctrl_value_to_sym(cptr,~0,val,
+ buf,PAGE_SIZE-1,&cnt);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_norm(cid=%d) is %.*s (%d)",
+ sfp,id,cnt,buf,val);
+ buf[cnt] = '\n';
+ return cnt+1;
+}
+
+static ssize_t show_val_custom(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int val,ret;
+ unsigned int cnt = 0;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ if (ret < 0) return ret;
+
+ ret = pvr2_ctrl_custom_value_to_sym(cptr,~0,val,
+ buf,PAGE_SIZE-1,&cnt);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_custom(cid=%d) is %.*s (%d)",
+ sfp,id,cnt,buf,val);
+ buf[cnt] = '\n';
+ return cnt+1;
+}
+
+static ssize_t show_enum(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+ unsigned int bcnt,ccnt,ecnt;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ ecnt = pvr2_ctrl_get_cnt(cptr);
+ bcnt = 0;
+ for (val = 0; val < ecnt; val++) {
+ pvr2_ctrl_get_valname(cptr,val,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
+ if (!ccnt) continue;
+ bcnt += ccnt;
+ if (bcnt >= PAGE_SIZE) break;
+ buf[bcnt] = '\n';
+ bcnt++;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_enum(cid=%d)",sfp,id);
+ return bcnt;
+}
+
+static ssize_t show_bits(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int valid_bits,msk;
+ unsigned int bcnt,ccnt;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ valid_bits = pvr2_ctrl_get_mask(cptr);
+ bcnt = 0;
+ for (msk = 1; valid_bits; msk <<= 1) {
+ if (!(msk & valid_bits)) continue;
+ valid_bits &= ~msk;
+ pvr2_ctrl_get_valname(cptr,msk,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
+ bcnt += ccnt;
+ if (bcnt >= PAGE_SIZE) break;
+ buf[bcnt] = '\n';
+ bcnt++;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_bits(cid=%d)",sfp,id);
+ return bcnt;
+}
+
+static int store_val_any(int id,int customfl,struct pvr2_sysfs *sfp,
+ const char *buf,unsigned int count)
+{
+ struct pvr2_ctrl *cptr;
+ int ret;
+ int mask,val;
+
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (customfl) {
+ ret = pvr2_ctrl_custom_sym_to_value(cptr,buf,count,&mask,&val);
+ } else {
+ ret = pvr2_ctrl_sym_to_value(cptr,buf,count,&mask,&val);
+ }
+ if (ret < 0) return ret;
+ ret = pvr2_ctrl_set_mask_value(cptr,mask,val);
+ pvr2_hdw_commit_ctl(sfp->channel.hdw);
+ return ret;
+}
+
+static ssize_t store_val_norm(int id,struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ ret = store_val_any(id,0,sfp,buf,count);
+ if (!ret) ret = count;
+ return ret;
+}
+
+static ssize_t store_val_custom(int id,struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ ret = store_val_any(id,1,sfp,buf,count);
+ if (!ret) ret = count;
+ return ret;
+}
+
+/*
+ Mike Isely <isely@pobox.com> 30-April-2005
+
+ This next batch of horrible preprocessor hackery is needed because the
+ kernel's class_device_attribute mechanism fails to pass the actual
+ attribute through to the show / store functions, which means we have no
+ way to package up any attribute-specific parameters, like for example the
+ control id. So we work around this brain-damage by encoding the control
+ id into the show / store functions themselves and pick the function based
+ on the control id we're setting up. These macros try to ease the pain.
+ Yuck.
+*/
+
+#define CREATE_SHOW_INSTANCE(sf_name,ctl_id) \
+static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,char *buf) \
+{ return sf_name(ctl_id,class_dev,buf); }
+
+#define CREATE_STORE_INSTANCE(sf_name,ctl_id) \
+static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,const char *buf,size_t count) \
+{ return sf_name(ctl_id,class_dev,buf,count); }
+
+#define CREATE_BATCH(ctl_id) \
+CREATE_SHOW_INSTANCE(show_name,ctl_id) \
+CREATE_SHOW_INSTANCE(show_type,ctl_id) \
+CREATE_SHOW_INSTANCE(show_min,ctl_id) \
+CREATE_SHOW_INSTANCE(show_max,ctl_id) \
+CREATE_SHOW_INSTANCE(show_val_norm,ctl_id) \
+CREATE_SHOW_INSTANCE(show_val_custom,ctl_id) \
+CREATE_SHOW_INSTANCE(show_enum,ctl_id) \
+CREATE_SHOW_INSTANCE(show_bits,ctl_id) \
+CREATE_STORE_INSTANCE(store_val_norm,ctl_id) \
+CREATE_STORE_INSTANCE(store_val_custom,ctl_id) \
+
+CREATE_BATCH(0)
+CREATE_BATCH(1)
+CREATE_BATCH(2)
+CREATE_BATCH(3)
+CREATE_BATCH(4)
+CREATE_BATCH(5)
+CREATE_BATCH(6)
+CREATE_BATCH(7)
+CREATE_BATCH(8)
+CREATE_BATCH(9)
+CREATE_BATCH(10)
+CREATE_BATCH(11)
+CREATE_BATCH(12)
+CREATE_BATCH(13)
+CREATE_BATCH(14)
+CREATE_BATCH(15)
+CREATE_BATCH(16)
+CREATE_BATCH(17)
+CREATE_BATCH(18)
+CREATE_BATCH(19)
+CREATE_BATCH(20)
+CREATE_BATCH(21)
+CREATE_BATCH(22)
+CREATE_BATCH(23)
+CREATE_BATCH(24)
+CREATE_BATCH(25)
+CREATE_BATCH(26)
+CREATE_BATCH(27)
+CREATE_BATCH(28)
+CREATE_BATCH(29)
+CREATE_BATCH(30)
+CREATE_BATCH(31)
+CREATE_BATCH(32)
+CREATE_BATCH(33)
+CREATE_BATCH(34)
+CREATE_BATCH(35)
+CREATE_BATCH(36)
+CREATE_BATCH(37)
+CREATE_BATCH(38)
+CREATE_BATCH(39)
+CREATE_BATCH(40)
+CREATE_BATCH(41)
+CREATE_BATCH(42)
+CREATE_BATCH(43)
+CREATE_BATCH(44)
+CREATE_BATCH(45)
+CREATE_BATCH(46)
+CREATE_BATCH(47)
+CREATE_BATCH(48)
+CREATE_BATCH(49)
+CREATE_BATCH(50)
+CREATE_BATCH(51)
+CREATE_BATCH(52)
+CREATE_BATCH(53)
+CREATE_BATCH(54)
+CREATE_BATCH(55)
+CREATE_BATCH(56)
+CREATE_BATCH(57)
+CREATE_BATCH(58)
+CREATE_BATCH(59)
+
+struct pvr2_sysfs_func_set {
+ ssize_t (*show_name)(struct class_device *,char *);
+ ssize_t (*show_type)(struct class_device *,char *);
+ ssize_t (*show_min)(struct class_device *,char *);
+ ssize_t (*show_max)(struct class_device *,char *);
+ ssize_t (*show_enum)(struct class_device *,char *);
+ ssize_t (*show_bits)(struct class_device *,char *);
+ ssize_t (*show_val_norm)(struct class_device *,char *);
+ ssize_t (*store_val_norm)(struct class_device *,
+ const char *,size_t);
+ ssize_t (*show_val_custom)(struct class_device *,char *);
+ ssize_t (*store_val_custom)(struct class_device *,
+ const char *,size_t);
+};
+
+#define INIT_BATCH(ctl_id) \
+[ctl_id] = { \
+ .show_name = show_name_##ctl_id, \
+ .show_type = show_type_##ctl_id, \
+ .show_min = show_min_##ctl_id, \
+ .show_max = show_max_##ctl_id, \
+ .show_enum = show_enum_##ctl_id, \
+ .show_bits = show_bits_##ctl_id, \
+ .show_val_norm = show_val_norm_##ctl_id, \
+ .store_val_norm = store_val_norm_##ctl_id, \
+ .show_val_custom = show_val_custom_##ctl_id, \
+ .store_val_custom = store_val_custom_##ctl_id, \
+} \
+
+static struct pvr2_sysfs_func_set funcs[] = {
+ INIT_BATCH(0),
+ INIT_BATCH(1),
+ INIT_BATCH(2),
+ INIT_BATCH(3),
+ INIT_BATCH(4),
+ INIT_BATCH(5),
+ INIT_BATCH(6),
+ INIT_BATCH(7),
+ INIT_BATCH(8),
+ INIT_BATCH(9),
+ INIT_BATCH(10),
+ INIT_BATCH(11),
+ INIT_BATCH(12),
+ INIT_BATCH(13),
+ INIT_BATCH(14),
+ INIT_BATCH(15),
+ INIT_BATCH(16),
+ INIT_BATCH(17),
+ INIT_BATCH(18),
+ INIT_BATCH(19),
+ INIT_BATCH(20),
+ INIT_BATCH(21),
+ INIT_BATCH(22),
+ INIT_BATCH(23),
+ INIT_BATCH(24),
+ INIT_BATCH(25),
+ INIT_BATCH(26),
+ INIT_BATCH(27),
+ INIT_BATCH(28),
+ INIT_BATCH(29),
+ INIT_BATCH(30),
+ INIT_BATCH(31),
+ INIT_BATCH(32),
+ INIT_BATCH(33),
+ INIT_BATCH(34),
+ INIT_BATCH(35),
+ INIT_BATCH(36),
+ INIT_BATCH(37),
+ INIT_BATCH(38),
+ INIT_BATCH(39),
+ INIT_BATCH(40),
+ INIT_BATCH(41),
+ INIT_BATCH(42),
+ INIT_BATCH(43),
+ INIT_BATCH(44),
+ INIT_BATCH(45),
+ INIT_BATCH(46),
+ INIT_BATCH(47),
+ INIT_BATCH(48),
+ INIT_BATCH(49),
+ INIT_BATCH(50),
+ INIT_BATCH(51),
+ INIT_BATCH(52),
+ INIT_BATCH(53),
+ INIT_BATCH(54),
+ INIT_BATCH(55),
+ INIT_BATCH(56),
+ INIT_BATCH(57),
+ INIT_BATCH(58),
+ INIT_BATCH(59),
+};
+
+
+static void pvr2_sysfs_add_control(struct pvr2_sysfs *sfp,int ctl_id)
+{
+ struct pvr2_sysfs_ctl_item *cip;
+ struct pvr2_sysfs_func_set *fp;
+ struct pvr2_ctrl *cptr;
+ unsigned int cnt,acnt;
+
+ if ((ctl_id < 0) || (ctl_id >= (sizeof(funcs)/sizeof(funcs[0])))) {
+ return;
+ }
+
+ fp = funcs + ctl_id;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,ctl_id);
+ if (!cptr) return;
+
+ cip = kmalloc(sizeof(*cip),GFP_KERNEL);
+ if (!cip) return;
+ memset(cip,0,sizeof(*cip));
+ pvr2_sysfs_trace("Creating pvr2_sysfs_ctl_item id=%p",cip);
+
+ cip->cptr = cptr;
+
+ cip->chptr = sfp;
+ cip->item_next = 0;
+ if (sfp->item_last) {
+ sfp->item_last->item_next = cip;
+ } else {
+ sfp->item_first = cip;
+ }
+ sfp->item_last = cip;
+
+ cip->attr_name.attr.owner = THIS_MODULE;
+ cip->attr_name.attr.name = "name";
+ cip->attr_name.attr.mode = S_IRUGO;
+ cip->attr_name.show = fp->show_name;
+
+ cip->attr_type.attr.owner = THIS_MODULE;
+ cip->attr_type.attr.name = "type";
+ cip->attr_type.attr.mode = S_IRUGO;
+ cip->attr_type.show = fp->show_type;
+
+ cip->attr_min.attr.owner = THIS_MODULE;
+ cip->attr_min.attr.name = "min_val";
+ cip->attr_min.attr.mode = S_IRUGO;
+ cip->attr_min.show = fp->show_min;
+
+ cip->attr_max.attr.owner = THIS_MODULE;
+ cip->attr_max.attr.name = "max_val";
+ cip->attr_max.attr.mode = S_IRUGO;
+ cip->attr_max.show = fp->show_max;
+
+ cip->attr_val.attr.owner = THIS_MODULE;
+ cip->attr_val.attr.name = "cur_val";
+ cip->attr_val.attr.mode = S_IRUGO;
+
+ cip->attr_custom.attr.owner = THIS_MODULE;
+ cip->attr_custom.attr.name = "custom_val";
+ cip->attr_custom.attr.mode = S_IRUGO;
+
+ cip->attr_enum.attr.owner = THIS_MODULE;
+ cip->attr_enum.attr.name = "enum_val";
+ cip->attr_enum.attr.mode = S_IRUGO;
+ cip->attr_enum.show = fp->show_enum;
+
+ cip->attr_bits.attr.owner = THIS_MODULE;
+ cip->attr_bits.attr.name = "bit_val";
+ cip->attr_bits.attr.mode = S_IRUGO;
+ cip->attr_bits.show = fp->show_bits;
+
+ if (pvr2_ctrl_is_writable(cptr)) {
+ cip->attr_val.attr.mode |= S_IWUSR|S_IWGRP;
+ cip->attr_custom.attr.mode |= S_IWUSR|S_IWGRP;
+ }
+
+ acnt = 0;
+ cip->attr_gen[acnt++] = &cip->attr_name.attr;
+ cip->attr_gen[acnt++] = &cip->attr_type.attr;
+ cip->attr_gen[acnt++] = &cip->attr_val.attr;
+ cip->attr_val.show = fp->show_val_norm;
+ cip->attr_val.store = fp->store_val_norm;
+ if (pvr2_ctrl_has_custom_symbols(cptr)) {
+ cip->attr_gen[acnt++] = &cip->attr_custom.attr;
+ cip->attr_custom.show = fp->show_val_custom;
+ cip->attr_custom.store = fp->store_val_custom;
+ }
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ // Control is an enumeration
+ cip->attr_gen[acnt++] = &cip->attr_enum.attr;
+ break;
+ case pvr2_ctl_int:
+ // Control is an integer
+ cip->attr_gen[acnt++] = &cip->attr_min.attr;
+ cip->attr_gen[acnt++] = &cip->attr_max.attr;
+ break;
+ case pvr2_ctl_bitmask:
+ // Control is an bitmask
+ cip->attr_gen[acnt++] = &cip->attr_bits.attr;
+ break;
+ default: break;
+ }
+
+ cnt = scnprintf(cip->name,sizeof(cip->name)-1,"ctl_%s",
+ pvr2_ctrl_get_name(cptr));
+ cip->name[cnt] = 0;
+ cip->grp.name = cip->name;
+ cip->grp.attrs = cip->attr_gen;
+
+ sysfs_create_group(&sfp->class_dev->kobj,&cip->grp);
+}
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+static ssize_t debuginfo_show(struct class_device *,char *);
+static ssize_t debugcmd_show(struct class_device *,char *);
+static ssize_t debugcmd_store(struct class_device *,const char *,size_t count);
+
+static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp)
+{
+ struct pvr2_sysfs_debugifc *dip;
+ dip = kmalloc(sizeof(*dip),GFP_KERNEL);
+ if (!dip) return;
+ memset(dip,0,sizeof(*dip));
+ dip->attr_debugcmd.attr.owner = THIS_MODULE;
+ dip->attr_debugcmd.attr.name = "debugcmd";
+ dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP;
+ dip->attr_debugcmd.show = debugcmd_show;
+ dip->attr_debugcmd.store = debugcmd_store;
+ dip->attr_debuginfo.attr.owner = THIS_MODULE;
+ dip->attr_debuginfo.attr.name = "debuginfo";
+ dip->attr_debuginfo.attr.mode = S_IRUGO;
+ dip->attr_debuginfo.show = debuginfo_show;
+ sfp->debugifc = dip;
+ class_device_create_file(sfp->class_dev,&dip->attr_debugcmd);
+ class_device_create_file(sfp->class_dev,&dip->attr_debuginfo);
+}
+
+
+static void pvr2_sysfs_tear_down_debugifc(struct pvr2_sysfs *sfp)
+{
+ if (!sfp->debugifc) return;
+ class_device_remove_file(sfp->class_dev,
+ &sfp->debugifc->attr_debuginfo);
+ class_device_remove_file(sfp->class_dev,&sfp->debugifc->attr_debugcmd);
+ kfree(sfp->debugifc);
+ sfp->debugifc = 0;
+}
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+
+static void pvr2_sysfs_add_controls(struct pvr2_sysfs *sfp)
+{
+ unsigned int idx,cnt;
+ cnt = pvr2_hdw_get_ctrl_count(sfp->channel.hdw);
+ for (idx = 0; idx < cnt; idx++) {
+ pvr2_sysfs_add_control(sfp,idx);
+ }
+}
+
+
+static void pvr2_sysfs_tear_down_controls(struct pvr2_sysfs *sfp)
+{
+ struct pvr2_sysfs_ctl_item *cip1,*cip2;
+ for (cip1 = sfp->item_first; cip1; cip1 = cip2) {
+ cip2 = cip1->item_next;
+ sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp);
+ pvr2_sysfs_trace("Destroying pvr2_sysfs_ctl_item id=%p",cip1);
+ kfree(cip1);
+ }
+}
+
+
+static void pvr2_sysfs_class_release(struct class *class)
+{
+ struct pvr2_sysfs_class *clp;
+ clp = container_of(class,struct pvr2_sysfs_class,class);
+ pvr2_sysfs_trace("Destroying pvr2_sysfs_class id=%p",clp);
+ kfree(clp);
+}
+
+
+static void pvr2_sysfs_release(struct class_device *class_dev)
+{
+ pvr2_sysfs_trace("Releasing class_dev id=%p",class_dev);
+ kfree(class_dev);
+}
+
+
+static void class_dev_destroy(struct pvr2_sysfs *sfp)
+{
+ if (!sfp->class_dev) return;
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ pvr2_sysfs_tear_down_debugifc(sfp);
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+ pvr2_sysfs_tear_down_controls(sfp);
+ class_device_remove_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
+ class_device_remove_file(sfp->class_dev,&sfp->attr_unit_number);
+ pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
+ sfp->class_dev->class_data = 0;
+ class_device_unregister(sfp->class_dev);
+ sfp->class_dev = 0;
+}
+
+
+static ssize_t v4l_minor_number_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return scnprintf(buf,PAGE_SIZE,"%d\n",
+ pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw));
+}
+
+
+static ssize_t unit_number_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return scnprintf(buf,PAGE_SIZE,"%d\n",
+ pvr2_hdw_get_unit_number(sfp->channel.hdw));
+}
+
+
+static void class_dev_create(struct pvr2_sysfs *sfp,
+ struct pvr2_sysfs_class *class_ptr)
+{
+ struct usb_device *usb_dev;
+ struct class_device *class_dev;
+ usb_dev = pvr2_hdw_get_dev(sfp->channel.hdw);
+ if (!usb_dev) return;
+ class_dev = kmalloc(sizeof(*class_dev),GFP_KERNEL);
+ if (!class_dev) return;
+ memset(class_dev,0,sizeof(*class_dev));
+
+ pvr2_sysfs_trace("Creating class_dev id=%p",class_dev);
+
+ class_dev->class = &class_ptr->class;
+ if (pvr2_hdw_get_sn(sfp->channel.hdw)) {
+ snprintf(class_dev->class_id,BUS_ID_SIZE,"sn-%lu",
+ pvr2_hdw_get_sn(sfp->channel.hdw));
+ } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) {
+ snprintf(class_dev->class_id,BUS_ID_SIZE,"unit-%c",
+ pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a');
+ } else {
+ kfree(class_dev);
+ return;
+ }
+
+ class_dev->dev = &usb_dev->dev;
+
+ sfp->class_dev = class_dev;
+ class_dev->class_data = sfp;
+ class_device_register(class_dev);
+
+ sfp->attr_v4l_minor_number.attr.owner = THIS_MODULE;
+ sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number";
+ sfp->attr_v4l_minor_number.attr.mode = S_IRUGO;
+ sfp->attr_v4l_minor_number.show = v4l_minor_number_show;
+ sfp->attr_v4l_minor_number.store = 0;
+ class_device_create_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
+ sfp->attr_unit_number.attr.owner = THIS_MODULE;
+ sfp->attr_unit_number.attr.name = "unit_number";
+ sfp->attr_unit_number.attr.mode = S_IRUGO;
+ sfp->attr_unit_number.show = unit_number_show;
+ sfp->attr_unit_number.store = 0;
+ class_device_create_file(sfp->class_dev,&sfp->attr_unit_number);
+
+ pvr2_sysfs_add_controls(sfp);
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ pvr2_sysfs_add_debugifc(sfp);
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+}
+
+
+static void pvr2_sysfs_internal_check(struct pvr2_channel *chp)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = container_of(chp,struct pvr2_sysfs,channel);
+ if (!sfp->channel.mc_head->disconnect_flag) return;
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_sysfs id=%p",sfp);
+ class_dev_destroy(sfp);
+ pvr2_channel_done(&sfp->channel);
+ kfree(sfp);
+}
+
+
+struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *mp,
+ struct pvr2_sysfs_class *class_ptr)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = kmalloc(sizeof(*sfp),GFP_KERNEL);
+ if (!sfp) return sfp;
+ memset(sfp,0,sizeof(*sfp));
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_sysfs id=%p",sfp);
+ pvr2_channel_init(&sfp->channel,mp);
+ sfp->channel.check_func = pvr2_sysfs_internal_check;
+
+ class_dev_create(sfp,class_ptr);
+ return sfp;
+}
+
+
+static int pvr2_sysfs_hotplug(struct class_device *cd,char **envp,
+ int numenvp,char *buf,int size)
+{
+ /* Even though we don't do anything here, we still need this function
+ because sysfs will still try to call it. */
+ return 0;
+}
+
+struct pvr2_sysfs_class *pvr2_sysfs_class_create(void)
+{
+ struct pvr2_sysfs_class *clp;
+ clp = kmalloc(sizeof(*clp),GFP_KERNEL);
+ if (!clp) return clp;
+ memset(clp,0,sizeof(*clp));
+ pvr2_sysfs_trace("Creating pvr2_sysfs_class id=%p",clp);
+ clp->class.name = "pvrusb2";
+ clp->class.class_release = pvr2_sysfs_class_release;
+ clp->class.release = pvr2_sysfs_release;
+ clp->class.uevent = pvr2_sysfs_hotplug;
+ if (class_register(&clp->class)) {
+ pvr2_sysfs_trace(
+ "Registration failed for pvr2_sysfs_class id=%p",clp);
+ kfree(clp);
+ clp = 0;
+ }
+ return clp;
+}
+
+
+void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *clp)
+{
+ class_unregister(&clp->class);
+}
+
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+static ssize_t debuginfo_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ pvr2_hdw_trigger_module_log(sfp->channel.hdw);
+ return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
+}
+
+
+static ssize_t debugcmd_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
+}
+
+
+static ssize_t debugcmd_store(struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+
+ ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
+ if (ret < 0) return ret;
+ return count;
+}
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.h b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
new file mode 100644
index 000000000000..ff9373b47f8f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_SYSFS_H
+#define __PVRUSB2_SYSFS_H
+
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include "pvrusb2-context.h"
+
+struct pvr2_sysfs;
+struct pvr2_sysfs_class;
+
+struct pvr2_sysfs_class *pvr2_sysfs_class_create(void);
+void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *);
+
+struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *,
+ struct pvr2_sysfs_class *);
+
+#endif /* __PVRUSB2_SYSFS_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.c b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
new file mode 100644
index 000000000000..f4aba8144ce0
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
@@ -0,0 +1,122 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_tuner_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ int type_update_fl;
+};
+
+
+static void set_type(struct pvr2_tuner_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct tuner_setup setup;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c tuner set_type(%d)",hdw->tuner_type);
+ if (((int)(hdw->tuner_type)) < 0) return;
+
+ setup.addr = ADDR_UNSET;
+ setup.type = hdw->tuner_type;
+ setup.mode_mask = T_RADIO | T_ANALOG_TV;
+ /* We may really want mode_mask to be T_ANALOG_TV for now */
+ pvr2_i2c_client_cmd(ctxt->client,TUNER_SET_TYPE_ADDR,&setup);
+ ctxt->type_update_fl = 0;
+}
+
+
+static int tuner_check(struct pvr2_tuner_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ if (hdw->tuner_updated) ctxt->type_update_fl = !0;
+ return ctxt->type_update_fl != 0;
+}
+
+
+static void tuner_update(struct pvr2_tuner_handler *ctxt)
+{
+ if (ctxt->type_update_fl) set_type(ctxt);
+}
+
+
+static void pvr2_tuner_detach(struct pvr2_tuner_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int pvr2_tuner_describe(struct pvr2_tuner_handler *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-tuner");
+}
+
+
+const static struct pvr2_i2c_handler_functions tuner_funcs = {
+ .detach = (void (*)(void *))pvr2_tuner_detach,
+ .check = (int (*)(void *))tuner_check,
+ .update = (void (*)(void *))tuner_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_tuner_describe,
+};
+
+
+int pvr2_i2c_tuner_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_tuner_handler *ctxt;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &tuner_funcs;
+ ctxt->type_update_fl = !0;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ cp->handler = &ctxt->i2c_handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tuner handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.h b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
new file mode 100644
index 000000000000..556f12aa9160
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_TUNER_H
+#define __PVRUSB2_TUNER_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_tuner_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_TUNER_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-util.h b/drivers/media/video/pvrusb2/pvrusb2-util.h
new file mode 100644
index 000000000000..e53aee416f56
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-util.h
@@ -0,0 +1,63 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_UTIL_H
+#define __PVRUSB2_UTIL_H
+
+#define PVR2_DECOMPOSE_LE(t,i,d) \
+ do { \
+ (t)[i] = (d) & 0xff;\
+ (t)[i+1] = ((d) >> 8) & 0xff;\
+ (t)[i+2] = ((d) >> 16) & 0xff;\
+ (t)[i+3] = ((d) >> 24) & 0xff;\
+ } while(0)
+
+#define PVR2_DECOMPOSE_BE(t,i,d) \
+ do { \
+ (t)[i+3] = (d) & 0xff;\
+ (t)[i+2] = ((d) >> 8) & 0xff;\
+ (t)[i+1] = ((d) >> 16) & 0xff;\
+ (t)[i] = ((d) >> 24) & 0xff;\
+ } while(0)
+
+#define PVR2_COMPOSE_LE(t,i) \
+ ((((u32)((t)[i+3])) << 24) | \
+ (((u32)((t)[i+2])) << 16) | \
+ (((u32)((t)[i+1])) << 8) | \
+ ((u32)((t)[i])))
+
+#define PVR2_COMPOSE_BE(t,i) \
+ ((((u32)((t)[i])) << 24) | \
+ (((u32)((t)[i+1])) << 16) | \
+ (((u32)((t)[i+2])) << 8) | \
+ ((u32)((t)[i+3])))
+
+
+#endif /* __PVRUSB2_UTIL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
new file mode 100644
index 000000000000..961951010c27
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -0,0 +1,1126 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include "pvrusb2-context.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-v4l2.h"
+#include "pvrusb2-ioread.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_v4l2_dev;
+struct pvr2_v4l2_fh;
+struct pvr2_v4l2;
+
+/* V4L no longer provide the ability to set / get a private context pointer
+ (i.e. video_get_drvdata / video_set_drvdata), which means we have to
+ concoct our own context locating mechanism. Supposedly this is intended
+ to simplify driver implementation. It's not clear to me how that can
+ possibly be true. Our solution here is to maintain a lookup table of
+ our context instances, indexed by the minor device number of the V4L
+ device. See pvr2_v4l2_open() for some implications of this approach. */
+static struct pvr2_v4l2_dev *devices[256];
+static DEFINE_MUTEX(device_lock);
+
+struct pvr2_v4l2_dev {
+ struct pvr2_v4l2 *v4lp;
+ struct video_device *vdev;
+ struct pvr2_context_stream *stream;
+ int ctxt_idx;
+ enum pvr2_config config;
+};
+
+struct pvr2_v4l2_fh {
+ struct pvr2_channel channel;
+ struct pvr2_v4l2_dev *dev_info;
+ enum v4l2_priority prio;
+ struct pvr2_ioread *rhp;
+ struct file *file;
+ struct pvr2_v4l2 *vhead;
+ struct pvr2_v4l2_fh *vnext;
+ struct pvr2_v4l2_fh *vprev;
+ wait_queue_head_t wait_data;
+ int fw_mode_flag;
+};
+
+struct pvr2_v4l2 {
+ struct pvr2_channel channel;
+ struct pvr2_v4l2_fh *vfirst;
+ struct pvr2_v4l2_fh *vlast;
+
+ struct v4l2_prio_state prio;
+
+ /* streams */
+ struct pvr2_v4l2_dev video_dev;
+};
+
+static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "Offset for device's minor");
+
+struct v4l2_capability pvr_capability ={
+ .driver = "pvrusb2",
+ .card = "Hauppauge WinTV pvr-usb2",
+ .bus_info = "usb",
+ .version = KERNEL_VERSION(0,8,0),
+ .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_TUNER | V4L2_CAP_AUDIO |
+ V4L2_CAP_READWRITE),
+ .reserved = {0,0,0,0}
+};
+
+static struct v4l2_tuner pvr_v4l2_tuners[]= {
+ {
+ .index = 0,
+ .name = "TV Tuner",
+ .type = V4L2_TUNER_ANALOG_TV,
+ .capability = (V4L2_TUNER_CAP_NORM |
+ V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 |
+ V4L2_TUNER_CAP_LANG2),
+ .rangelow = 0,
+ .rangehigh = 0,
+ .rxsubchans = V4L2_TUNER_SUB_STEREO,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .signal = 0,
+ .afc = 0,
+ .reserved = {0,0,0,0}
+ }
+};
+
+struct v4l2_fmtdesc pvr_fmtdesc [] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .description = "MPEG1/2",
+ // This should really be V4L2_PIX_FMT_MPEG, but xawtv
+ // breaks when I do that.
+ .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
+ .reserved = { 0, 0, 0, 0 }
+ }
+};
+
+#define PVR_FORMAT_PIX 0
+#define PVR_FORMAT_VBI 1
+
+struct v4l2_format pvr_format [] = {
+ [PVR_FORMAT_PIX] = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt = {
+ .pix = {
+ .width = 720,
+ .height = 576,
+ // This should really be V4L2_PIX_FMT_MPEG,
+ // but xawtv breaks when I do that.
+ .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 0, // doesn't make sense
+ // here
+ //FIXME : Don't know what to put here...
+ .sizeimage = (32*1024),
+ .colorspace = 0, // doesn't make sense here
+ .priv = 0
+ }
+ }
+ },
+ [PVR_FORMAT_VBI] = {
+ .type = V4L2_BUF_TYPE_VBI_CAPTURE,
+ .fmt = {
+ .vbi = {
+ .sampling_rate = 27000000,
+ .offset = 248,
+ .samples_per_line = 1443,
+ .sample_format = V4L2_PIX_FMT_GREY,
+ .start = { 0, 0 },
+ .count = { 0, 0 },
+ .flags = 0,
+ .reserved = { 0, 0 }
+ }
+ }
+ }
+};
+
+/*
+ * pvr_ioctl()
+ *
+ * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
+ *
+ */
+static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
+ struct pvr2_v4l2_dev *dev_info = fh->dev_info;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int ret = -EINVAL;
+
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd);
+ }
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ioctl failed - bad or no context");
+ return -EFAULT;
+ }
+
+ /* check priority */
+ switch (cmd) {
+ case VIDIOC_S_CTRL:
+ case VIDIOC_S_STD:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_TUNER:
+ case VIDIOC_S_FREQUENCY:
+ ret = v4l2_prio_check(&vp->prio, &fh->prio);
+ if (ret)
+ return ret;
+ }
+
+ switch (cmd) {
+ case VIDIOC_QUERYCAP:
+ {
+ struct v4l2_capability *cap = arg;
+
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_PRIORITY:
+ {
+ enum v4l2_priority *p = arg;
+
+ *p = v4l2_prio_max(&vp->prio);
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_S_PRIORITY:
+ {
+ enum v4l2_priority *prio = arg;
+
+ ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio);
+ break;
+ }
+
+ case VIDIOC_ENUMSTD:
+ {
+ struct v4l2_standard *vs = (struct v4l2_standard *)arg;
+ int idx = vs->index;
+ ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1);
+ break;
+ }
+
+ case VIDIOC_G_STD:
+ {
+ int val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val);
+ *(v4l2_std_id *)arg = val;
+ break;
+ }
+
+ case VIDIOC_S_STD:
+ {
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),
+ *(v4l2_std_id *)arg);
+ break;
+ }
+
+ case VIDIOC_ENUMINPUT:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ struct v4l2_input tmp;
+ unsigned int cnt;
+
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+
+ memset(&tmp,0,sizeof(tmp));
+ tmp.index = vi->index;
+ ret = 0;
+ switch (vi->index) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_RADIO:
+ tmp.type = V4L2_INPUT_TYPE_TUNER;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ tmp.type = V4L2_INPUT_TYPE_CAMERA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0) break;
+
+ cnt = 0;
+ pvr2_ctrl_get_valname(cptr,vi->index,
+ tmp.name,sizeof(tmp.name)-1,&cnt);
+ tmp.name[cnt] = 0;
+
+ /* Don't bother with audioset, since this driver currently
+ always switches the audio whenever the video is
+ switched. */
+
+ /* Handling std is a tougher problem. It doesn't make
+ sense in cases where a device might be multi-standard.
+ We could just copy out the current value for the
+ standard, but it can change over time. For now just
+ leave it zero. */
+
+ memcpy(vi, &tmp, sizeof(tmp));
+
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_INPUT:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ int val;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+ val = 0;
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ vi->index = val;
+ break;
+ }
+
+ case VIDIOC_S_INPUT:
+ {
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
+ vi->index);
+ break;
+ }
+
+ case VIDIOC_ENUMAUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOC_G_AUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOC_S_AUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+ case VIDIOC_G_TUNER:
+ {
+ struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
+ unsigned int status_mask;
+ int val;
+ if (vt->index !=0) break;
+
+ status_mask = pvr2_hdw_get_signal_status(hdw);
+
+ memcpy(vt, &pvr_v4l2_tuners[vt->index],
+ sizeof(struct v4l2_tuner));
+
+ vt->signal = 0;
+ if (status_mask & PVR2_SIGNAL_OK) {
+ if (status_mask & PVR2_SIGNAL_STEREO) {
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ } else {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+ if (status_mask & PVR2_SIGNAL_SAP) {
+ vt->rxsubchans |= (V4L2_TUNER_SUB_LANG1 |
+ V4L2_TUNER_SUB_LANG2);
+ }
+ vt->signal = 65535;
+ }
+
+ val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ &val);
+ vt->audmode = val;
+ break;
+ }
+
+ case VIDIOC_S_TUNER:
+ {
+ struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
+
+ if (vt->index != 0)
+ break;
+
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ vt->audmode);
+ }
+
+ case VIDIOC_S_FREQUENCY:
+ {
+ const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+ vf->frequency * 62500);
+ break;
+ }
+
+ case VIDIOC_G_FREQUENCY:
+ {
+ struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
+ int val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+ &val);
+ val /= 62500;
+ vf->frequency = val;
+ break;
+ }
+
+ case VIDIOC_ENUM_FMT:
+ {
+ struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg;
+
+ /* Only one format is supported : mpeg.*/
+ if (fd->index != 0)
+ break;
+
+ memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_FMT:
+ {
+ struct v4l2_format *vf = (struct v4l2_format *)arg;
+ int val;
+ switch(vf->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES),
+ &val);
+ vf->fmt.pix.width = val;
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES),
+ &val);
+ vf->fmt.pix.height = val;
+ ret = 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ // ????? Still need to figure out to do VBI correctly
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_TRY_FMT:
+ case VIDIOC_S_FMT:
+ {
+ struct v4l2_format *vf = (struct v4l2_format *)arg;
+
+ ret = 0;
+ switch(vf->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
+ int h = vf->fmt.pix.height;
+ int w = vf->fmt.pix.width;
+
+ if (h < 200) {
+ h = 200;
+ } else if (h > 625) {
+ h = 625;
+ }
+ if (w < 320) {
+ w = 320;
+ } else if (w > 720) {
+ w = 720;
+ }
+
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ vf->fmt.pix.width = w;
+ vf->fmt.pix.height = h;
+
+ if (cmd == VIDIOC_S_FMT) {
+ pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_HRES),
+ vf->fmt.pix.width);
+ pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_VRES),
+ vf->fmt.pix.height);
+ }
+ } break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ // ????? Still need to figure out to do VBI correctly
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_STREAMON:
+ {
+ ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
+ if (ret < 0) return ret;
+ ret = pvr2_hdw_set_streaming(hdw,!0);
+ break;
+ }
+
+ case VIDIOC_STREAMOFF:
+ {
+ ret = pvr2_hdw_set_streaming(hdw,0);
+ break;
+ }
+
+ case VIDIOC_QUERYCTRL:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg;
+ ret = 0;
+ if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ cptr = pvr2_hdw_get_ctrl_nextv4l(
+ hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
+ if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr);
+ } else {
+ cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id);
+ }
+ if (!cptr) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x not implemented here",
+ vc->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ vc->id,pvr2_ctrl_get_name(cptr),
+ pvr2_ctrl_get_desc(cptr));
+ strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name));
+ vc->flags = pvr2_ctrl_get_v4lflags(cptr);
+ vc->default_value = pvr2_ctrl_get_def(cptr);
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ vc->type = V4L2_CTRL_TYPE_MENU;
+ vc->minimum = 0;
+ vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
+ vc->step = 1;
+ break;
+ case pvr2_ctl_bool:
+ vc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ vc->minimum = 0;
+ vc->maximum = 1;
+ vc->step = 1;
+ break;
+ case pvr2_ctl_int:
+ vc->type = V4L2_CTRL_TYPE_INTEGER;
+ vc->minimum = pvr2_ctrl_get_min(cptr);
+ vc->maximum = pvr2_ctrl_get_max(cptr);
+ vc->step = 1;
+ break;
+ default:
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x name=%s not mappable",
+ vc->id,pvr2_ctrl_get_name(cptr));
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_QUERYMENU:
+ {
+ struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg;
+ unsigned int cnt = 0;
+ ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id),
+ vm->index,
+ vm->name,sizeof(vm->name)-1,
+ &cnt);
+ vm->name[cnt] = 0;
+ break;
+ }
+
+ case VIDIOC_G_CTRL:
+ {
+ struct v4l2_control *vc = (struct v4l2_control *)arg;
+ int val = 0;
+ ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
+ &val);
+ vc->value = val;
+ break;
+ }
+
+ case VIDIOC_S_CTRL:
+ {
+ struct v4l2_control *vc = (struct v4l2_control *)arg;
+ ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
+ vc->value);
+ break;
+ }
+
+ case VIDIOC_G_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int val;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val);
+ if (ret) {
+ ctls->error_idx = idx;
+ break;
+ }
+ /* Ensure that if read as a 64 bit value, the user
+ will still get a hopefully sane value */
+ ctrl->value64 = 0;
+ ctrl->value = val;
+ }
+ break;
+ }
+
+ case VIDIOC_S_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),
+ ctrl->value);
+ if (ret) {
+ ctls->error_idx = idx;
+ break;
+ }
+ }
+ break;
+ }
+
+ case VIDIOC_TRY_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ struct pvr2_ctrl *pctl;
+ unsigned int idx;
+ /* For the moment just validate that the requested control
+ actually exists. */
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id);
+ if (!pctl) {
+ ret = -EINVAL;
+ ctls->error_idx = idx;
+ break;
+ }
+ }
+ break;
+ }
+
+ case VIDIOC_LOG_STATUS:
+ {
+ pvr2_hdw_trigger_module_log(hdw);
+ ret = 0;
+ break;
+ }
+
+ default :
+ ret = v4l_compat_translate_ioctl(inode,file,cmd,
+ arg,pvr2_v4l2_do_ioctl);
+ }
+
+ pvr2_hdw_commit_ctl(hdw);
+
+ if (ret < 0) {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%d",ret);
+ } else {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%d"
+ " command was:",ret);
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
+ cmd);
+ }
+ }
+ } else {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl complete, ret=%d (0x%x)",
+ ret,ret);
+ }
+ return ret;
+}
+
+
+static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
+{
+ pvr2_trace(PVR2_TRACE_INIT,
+ "unregistering device video%d [%s]",
+ dip->vdev->minor,pvr2_config_get_name(dip->config));
+ if (dip->ctxt_idx >= 0) {
+ mutex_lock(&device_lock);
+ devices[dip->ctxt_idx] = NULL;
+ dip->ctxt_idx = -1;
+ mutex_unlock(&device_lock);
+ }
+ video_unregister_device(dip->vdev);
+}
+
+
+static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp)
+{
+ pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,-1);
+ pvr2_v4l2_dev_destroy(&vp->video_dev);
+
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp);
+ pvr2_channel_done(&vp->channel);
+ kfree(vp);
+}
+
+
+void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
+{
+ struct pvr2_v4l2 *vp;
+ vp = container_of(chp,struct pvr2_v4l2,channel);
+ if (!vp->channel.mc_head->disconnect_flag) return;
+ if (vp->vfirst) return;
+ pvr2_v4l2_destroy_no_lock(vp);
+}
+
+
+int pvr2_v4l2_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+
+/* Temporary hack : use ivtv api until a v4l2 one is available. */
+#define IVTV_IOC_G_CODEC 0xFFEE7703
+#define IVTV_IOC_S_CODEC 0xFFEE7704
+ if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0;
+ return video_usercopy(inode, file, cmd, arg, pvr2_v4l2_do_ioctl);
+}
+
+
+int pvr2_v4l2_release(struct inode *inode, struct file *file)
+{
+ struct pvr2_v4l2_fh *fhp = file->private_data;
+ struct pvr2_v4l2 *vp = fhp->vhead;
+ struct pvr2_context *mp = fhp->vhead->channel.mc_head;
+
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release");
+
+ if (fhp->rhp) {
+ struct pvr2_stream *sp;
+ struct pvr2_hdw *hdw;
+ hdw = fhp->channel.mc_head->hdw;
+ pvr2_hdw_set_streaming(hdw,0);
+ sp = pvr2_ioread_get_stream(fhp->rhp);
+ if (sp) pvr2_stream_set_callback(sp,0,0);
+ pvr2_ioread_destroy(fhp->rhp);
+ fhp->rhp = 0;
+ }
+ v4l2_prio_close(&vp->prio, &fhp->prio);
+ file->private_data = NULL;
+
+ pvr2_context_enter(mp); do {
+ if (fhp->vnext) {
+ fhp->vnext->vprev = fhp->vprev;
+ } else {
+ vp->vlast = fhp->vprev;
+ }
+ if (fhp->vprev) {
+ fhp->vprev->vnext = fhp->vnext;
+ } else {
+ vp->vfirst = fhp->vnext;
+ }
+ fhp->vnext = 0;
+ fhp->vprev = 0;
+ fhp->vhead = 0;
+ pvr2_channel_done(&fhp->channel);
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "Destroying pvr_v4l2_fh id=%p",fhp);
+ kfree(fhp);
+ if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) {
+ pvr2_v4l2_destroy_no_lock(vp);
+ }
+ } while (0); pvr2_context_exit(mp);
+ return 0;
+}
+
+
+int pvr2_v4l2_open(struct inode *inode, struct file *file)
+{
+ struct pvr2_v4l2_dev *dip = 0; /* Our own context pointer */
+ struct pvr2_v4l2_fh *fhp;
+ struct pvr2_v4l2 *vp;
+ struct pvr2_hdw *hdw;
+
+ mutex_lock(&device_lock);
+ /* MCI 7-Jun-2006 Even though we're just doing what amounts to an
+ atomic read of the device mapping array here, we still need the
+ mutex. The problem is that there is a tiny race possible when
+ we register the device. We can't update the device mapping
+ array until after the device has been registered, owing to the
+ fact that we can't know the minor device number until after the
+ registration succeeds. And if another thread tries to open the
+ device in the window of time after registration but before the
+ map is updated, then it will get back an erroneous null pointer
+ and the open will result in a spurious failure. The only way to
+ prevent that is to (a) be inside the mutex here before we access
+ the array, and (b) cover the entire registration process later
+ on with this same mutex. Thus if we get inside the mutex here,
+ then we can be assured that the registration process actually
+ completed correctly. This is an unhappy complication from the
+ use of global data in a driver that lives in a preemptible
+ environment. It sure would be nice if the video device itself
+ had a means for storing and retrieving a local context pointer.
+ Oh wait. It did. But now it's gone. Silly me. */
+ {
+ unsigned int midx = iminor(file->f_dentry->d_inode);
+ if (midx < sizeof(devices)/sizeof(devices[0])) {
+ dip = devices[midx];
+ }
+ }
+ mutex_unlock(&device_lock);
+
+ if (!dip) return -ENODEV; /* Should be impossible but I'm paranoid */
+
+ vp = dip->v4lp;
+ hdw = vp->channel.hdw;
+
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open");
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,
+ "pvr2_v4l2_open: hardware not ready");
+ return -EIO;
+ }
+
+ fhp = kmalloc(sizeof(*fhp),GFP_KERNEL);
+ if (!fhp) {
+ return -ENOMEM;
+ }
+ memset(fhp,0,sizeof(*fhp));
+
+ init_waitqueue_head(&fhp->wait_data);
+ fhp->dev_info = dip;
+
+ pvr2_context_enter(vp->channel.mc_head); do {
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
+ pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
+ fhp->vnext = 0;
+ fhp->vprev = vp->vlast;
+ if (vp->vlast) {
+ vp->vlast->vnext = fhp;
+ } else {
+ vp->vfirst = fhp;
+ }
+ vp->vlast = fhp;
+ fhp->vhead = vp;
+ } while (0); pvr2_context_exit(vp->channel.mc_head);
+
+ fhp->file = file;
+ file->private_data = fhp;
+ v4l2_prio_open(&vp->prio,&fhp->prio);
+
+ fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
+
+ return 0;
+}
+
+
+static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
+{
+ wake_up(&fhp->wait_data);
+}
+
+static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
+{
+ int ret;
+ struct pvr2_stream *sp;
+ struct pvr2_hdw *hdw;
+ if (fh->rhp) return 0;
+
+ /* First read() attempt. Try to claim the stream and start
+ it... */
+ if ((ret = pvr2_channel_claim_stream(&fh->channel,
+ fh->dev_info->stream)) != 0) {
+ /* Someone else must already have it */
+ return ret;
+ }
+
+ fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
+ if (!fh->rhp) {
+ pvr2_channel_claim_stream(&fh->channel,0);
+ return -ENOMEM;
+ }
+
+ hdw = fh->channel.mc_head->hdw;
+ sp = fh->dev_info->stream->stream;
+ pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
+ pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
+ pvr2_hdw_set_streaming(hdw,!0);
+ ret = pvr2_ioread_set_enabled(fh->rhp,!0);
+
+ return ret;
+}
+
+
+static ssize_t pvr2_v4l2_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ int ret;
+
+ if (fh->fw_mode_flag) {
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ char *tbuf;
+ int c1,c2;
+ int tcnt = 0;
+ unsigned int offs = *ppos;
+
+ tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL);
+ if (!tbuf) return -ENOMEM;
+
+ while (count) {
+ c1 = count;
+ if (c1 > PAGE_SIZE) c1 = PAGE_SIZE;
+ c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1);
+ if (c2 < 0) {
+ tcnt = c2;
+ break;
+ }
+ if (!c2) break;
+ if (copy_to_user(buff,tbuf,c2)) {
+ tcnt = -EFAULT;
+ break;
+ }
+ offs += c2;
+ tcnt += c2;
+ buff += c2;
+ count -= c2;
+ *ppos += c2;
+ }
+ kfree(tbuf);
+ return tcnt;
+ }
+
+ if (!fh->rhp) {
+ ret = pvr2_v4l2_iosetup(fh);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ for (;;) {
+ ret = pvr2_ioread_read(fh->rhp,buff,count);
+ if (ret >= 0) break;
+ if (ret != -EAGAIN) break;
+ if (file->f_flags & O_NONBLOCK) break;
+ /* Doing blocking I/O. Wait here. */
+ ret = wait_event_interruptible(
+ fh->wait_data,
+ pvr2_ioread_avail(fh->rhp) >= 0);
+ if (ret < 0) break;
+ }
+
+ return ret;
+}
+
+
+static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ int ret;
+
+ if (fh->fw_mode_flag) {
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+ }
+
+ if (!fh->rhp) {
+ ret = pvr2_v4l2_iosetup(fh);
+ if (ret) return POLLERR;
+ }
+
+ poll_wait(file,&fh->wait_data,wait);
+
+ if (pvr2_ioread_avail(fh->rhp) >= 0) {
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ return mask;
+}
+
+
+static struct file_operations vdev_fops = {
+ .owner = THIS_MODULE,
+ .open = pvr2_v4l2_open,
+ .release = pvr2_v4l2_release,
+ .read = pvr2_v4l2_read,
+ .ioctl = pvr2_v4l2_ioctl,
+ .llseek = no_llseek,
+ .poll = pvr2_v4l2_poll,
+};
+
+
+#define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */
+
+static struct video_device vdev_template = {
+ .owner = THIS_MODULE,
+ .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER,
+ .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE
+ | V4L2_CAP_TUNER | V4L2_CAP_AUDIO
+ | V4L2_CAP_READWRITE),
+ .hardware = VID_HARDWARE_PVRUSB2,
+ .fops = &vdev_fops,
+};
+
+
+static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
+ struct pvr2_v4l2 *vp,
+ enum pvr2_config cfg)
+{
+ int mindevnum;
+ int unit_number;
+ int v4l_type;
+ dip->v4lp = vp;
+ dip->config = cfg;
+
+
+ switch (cfg) {
+ case pvr2_config_mpeg:
+ v4l_type = VFL_TYPE_GRABBER;
+ dip->stream = &vp->channel.mc_head->video_stream;
+ break;
+ case pvr2_config_vbi:
+ v4l_type = VFL_TYPE_VBI;
+ break;
+ case pvr2_config_radio:
+ v4l_type = VFL_TYPE_RADIO;
+ break;
+ default:
+ /* Bail out (this should be impossible) */
+ err("Failed to set up pvrusb2 v4l dev"
+ " due to unrecognized config");
+ return;
+ }
+
+ if (!dip->stream) {
+ err("Failed to set up pvrusb2 v4l dev"
+ " due to missing stream instance");
+ return;
+ }
+
+ dip->vdev = video_device_alloc();
+ if (!dip->vdev) {
+ err("Alloc of pvrusb2 v4l video device failed");
+ return;
+ }
+
+ memcpy(dip->vdev,&vdev_template,sizeof(vdev_template));
+ dip->vdev->release = video_device_release;
+ mutex_lock(&device_lock);
+
+ mindevnum = -1;
+ unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw);
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ mindevnum = video_nr[unit_number];
+ }
+ if ((video_register_device(dip->vdev, v4l_type, mindevnum) < 0) &&
+ (video_register_device(dip->vdev, v4l_type, -1) < 0)) {
+ err("Failed to register pvrusb2 v4l video device");
+ } else {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "registered device video%d [%s]",
+ dip->vdev->minor,pvr2_config_get_name(dip->config));
+ }
+
+ if ((dip->vdev->minor < sizeof(devices)/sizeof(devices[0])) &&
+ (devices[dip->vdev->minor] == NULL)) {
+ dip->ctxt_idx = dip->vdev->minor;
+ devices[dip->ctxt_idx] = dip;
+ }
+ mutex_unlock(&device_lock);
+
+ pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
+ dip->vdev->minor);
+}
+
+
+struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
+{
+ struct pvr2_v4l2 *vp;
+
+ vp = kmalloc(sizeof(*vp),GFP_KERNEL);
+ if (!vp) return vp;
+ memset(vp,0,sizeof(*vp));
+ vp->video_dev.ctxt_idx = -1;
+ pvr2_channel_init(&vp->channel,mnp);
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp);
+
+ vp->channel.check_func = pvr2_v4l2_internal_check;
+
+ /* register streams */
+ pvr2_v4l2_dev_init(&vp->video_dev,vp,pvr2_config_mpeg);
+
+
+ return vp;
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
new file mode 100644
index 000000000000..9a995e2d2256
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_V4L2_H
+#define __PVRUSB2_V4L2_H
+
+#include "pvrusb2-context.h"
+
+struct pvr2_v4l2;
+
+struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *);
+
+#endif /* __PVRUSB2_V4L2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
new file mode 100644
index 000000000000..e4ec7f25194c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -0,0 +1,253 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ saa711x support that is available in the v4l available starting
+ with linux 2.6.15.
+
+*/
+
+#include "pvrusb2-video-v4l.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/saa7115.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_decoder {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_decoder_ctrl ctrl;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
+ switch(hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ route.input = SAA7115_COMPOSITE4;
+ break;
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ route.input = SAA7115_COMPOSITE5;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ route.input = SAA7115_SVIDEO2;
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ // ????? No idea yet what to do here
+ default:
+ return;
+ }
+ route.output = 0;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
+}
+
+
+static int check_input(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+static void set_audio(struct pvr2_v4l_decoder *ctxt)
+{
+ u32 val;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_audio %d",
+ hdw->srate_val);
+ switch (hdw->srate_val) {
+ default:
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
+ val = 48000;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
+ val = 44100;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
+ val = 32000;
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
+}
+
+
+static int check_audio(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->srate_dirty != 0;
+}
+
+
+struct pvr2_v4l_decoder_ops {
+ void (*update)(struct pvr2_v4l_decoder *);
+ int (*check)(struct pvr2_v4l_decoder *);
+};
+
+
+static const struct pvr2_v4l_decoder_ops decoder_ops[] = {
+ { .update = set_input, .check = check_input},
+ { .update = set_audio, .check = check_audio},
+};
+
+
+static void decoder_detach(struct pvr2_v4l_decoder *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->decoder_ctrl = 0;
+ kfree(ctxt);
+}
+
+
+static int decoder_check(struct pvr2_v4l_decoder *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (decoder_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void decoder_update(struct pvr2_v4l_decoder *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ decoder_ops[idx].update(ctxt);
+ }
+}
+
+
+static int decoder_detect(struct pvr2_i2c_client *cp)
+{
+ /* Attempt to query the decoder - let's see if it will answer */
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(cp,VIDIOC_G_TUNER,&vt);
+ return ret == 0; /* Return true if it answered */
+}
+
+
+static void decoder_enable(struct pvr2_v4l_decoder *ctxt,int fl)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 decoder_enable(%d)",fl);
+ pvr2_v4l2_cmd_stream(ctxt->client,fl);
+}
+
+
+static int decoder_is_tuned(struct pvr2_v4l_decoder *ctxt)
+{
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (ret < 0) return -EINVAL;
+ return vt.signal ? 1 : 0;
+}
+
+
+static unsigned int decoder_describe(struct pvr2_v4l_decoder *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-video-v4l");
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))decoder_detach,
+ .check = (int (*)(void *))decoder_check,
+ .update = (void (*)(void *))decoder_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
+};
+
+
+int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *hdw,
+ struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_decoder *ctxt;
+
+ if (hdw->decoder_ctrl) return 0;
+ if (cp->handler) return 0;
+ if (!decoder_detect(cp)) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->ctrl.ctxt = ctxt;
+ ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
+ ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
+ ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
+ sizeof(decoder_ops[0]))) - 1;
+ hdw->decoder_ctrl = &ctxt->ctrl;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x saa711x V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
new file mode 100644
index 000000000000..2b917fda02e4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_VIDEO_V4L_H
+#define __PVRUSB2_VIDEO_V4L_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which handles device video processing. This interface is
+ used internally by the driver; higher level code should only
+ interact through the interface provided by pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_VIDEO_V4L_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.c b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
new file mode 100644
index 000000000000..fcad346e3955
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
@@ -0,0 +1,170 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ wm8775.
+
+*/
+
+#include "pvrusb2-wm8775.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_wm8775 {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_wm8775 *ctxt)
+{
+ struct v4l2_routing route;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ int msk = 0;
+
+ memset(&route,0,sizeof(route));
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c wm8775 set_input(val=%d msk=0x%x)",
+ hdw->input_val,msk);
+
+ // Always point to input #1 no matter what
+ route.input = 2;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+static int check_input(struct pvr2_v4l_wm8775 *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+struct pvr2_v4l_wm8775_ops {
+ void (*update)(struct pvr2_v4l_wm8775 *);
+ int (*check)(struct pvr2_v4l_wm8775 *);
+};
+
+
+static const struct pvr2_v4l_wm8775_ops wm8775_ops[] = {
+ { .update = set_input, .check = check_input},
+};
+
+
+static unsigned int wm8775_describe(struct pvr2_v4l_wm8775 *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-wm8775");
+}
+
+
+static void wm8775_detach(struct pvr2_v4l_wm8775 *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static int wm8775_check(struct pvr2_v4l_wm8775 *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (wm8775_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void wm8775_update(struct pvr2_v4l_wm8775 *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ wm8775_ops[idx].update(ctxt);
+ }
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))wm8775_detach,
+ .check = (int (*)(void *))wm8775_check,
+ .update = (void (*)(void *))wm8775_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))wm8775_describe,
+};
+
+
+int pvr2_i2c_wm8775_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_wm8775 *ctxt;
+
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(wm8775_ops)/
+ sizeof(wm8775_ops[0]))) - 1;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x wm8775 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.h b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
new file mode 100644
index 000000000000..8aaeff4e1e20
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_WM8775_H
+#define __PVRUSB2_WM8775_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which performs analog -> digital audio conversion for
+ external audio inputs. This interface is used internally by the
+ driver; higher level code should only interact through the
+ interface provided by pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_wm8775_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_WM8775_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2.h b/drivers/media/video/pvrusb2/pvrusb2.h
new file mode 100644
index 000000000000..074533e9c21e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_H
+#define __PVRUSB2_H
+
+/* Maximum number of pvrusb2 instances we can track at once. You
+ might want to increase this - however the driver operation will not
+ be impaired if it is too small. Instead additional units just
+ won't have an ID assigned and it might not be possible to specify
+ module paramters for those extra units. */
+#define PVR_NUM 20
+
+#endif /* __PVRUSB2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index de7b9e6e932a..afc8f352b8e7 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -432,10 +432,10 @@ static void saa6752hs_old_set_params(struct i2c_client* client,
}
static int handle_ctrl(struct saa6752hs_mpeg_params *params,
- struct v4l2_ext_control *ctrl, int cmd)
+ struct v4l2_ext_control *ctrl, unsigned int cmd)
{
int old = 0, new;
- int set = cmd == VIDIOC_S_EXT_CTRLS;
+ int set = (cmd == VIDIOC_S_EXT_CTRLS);
new = ctrl->value;
switch (ctrl->id) {
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index f0c2111f14ad..da3007d2f411 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -871,9 +871,9 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%lx\n", dev->name,
+ "latency: %d, mmio: 0x%llx\n", dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,pci_resource_start(pci_dev,0));
+ dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev, DMA_32BIT_MASK)) {
printk("%s: Oops: no 32bit PCI DMA ???\n",dev->name);
@@ -905,8 +905,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
pci_resource_len(pci_dev,0),
dev->name)) {
err = -EBUSY;
- printk(KERN_ERR "%s: can't get MMIO memory @ 0x%lx\n",
- dev->name,pci_resource_start(pci_dev,0));
+ printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
+ dev->name,(unsigned long long)pci_resource_start(pci_dev,0));
goto fail1;
}
dev->lmmio = ioremap(pci_resource_start(pci_dev,0), 0x1000);
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 6be9c1131e1f..c18b31d9928c 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -2190,7 +2190,7 @@ static struct pci_driver stradis_driver = {
.remove = __devexit_p(stradis_remove)
};
-int __init stradis_init(void)
+static int __init stradis_init(void)
{
int retval;
@@ -2203,7 +2203,7 @@ int __init stradis_init(void)
return retval;
}
-void __exit stradis_exit(void)
+static void __exit stradis_exit(void)
{
pci_unregister_driver(&stradis_driver);
printk(KERN_INFO "stradis: module cleanup complete\n");
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index b6ae969563b2..2fadabf99688 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -22,11 +22,11 @@
*/
#define tda9887_info(fmt, arg...) do {\
- printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
#define tda9887_dbg(fmt, arg...) do {\
if (tuner_debug) \
- printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
@@ -84,8 +84,7 @@ struct tvnorm {
#define cAudioGain6 0x80 // bit c7
#define cTopMask 0x1f // bit c0:4
-#define cTopPalSecamDefault 0x14 // bit c0:4
-#define cTopNtscRadioDefault 0x10 // bit c0:4
+#define cTopDefault 0x10 // bit c0:4
//// third reg (e)
#define cAudioIF_4_5 0x00 // bit e0:1
@@ -123,7 +122,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_5_5 |
cVideoIF_38_90 ),
@@ -134,7 +133,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_0 |
cVideoIF_38_90 ),
@@ -145,7 +144,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -156,7 +155,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
@@ -165,7 +164,7 @@ static struct tvnorm tvnorms[] = {
.name = "SECAM-BGH",
.b = ( cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_5_5 |
cVideoIF_38_90 ),
@@ -174,7 +173,7 @@ static struct tvnorm tvnorms[] = {
.name = "SECAM-L",
.b = ( cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -184,7 +183,7 @@ static struct tvnorm tvnorms[] = {
.b = ( cOutputPort2Inactive |
cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_33_90 ),
@@ -195,7 +194,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -206,7 +205,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
@@ -217,7 +216,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_58_75 ),
@@ -230,7 +229,7 @@ static struct tvnorm radio_stereo = {
cQSS ),
.c = ( cDeemphasisOFF |
cAudioGain6 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
@@ -242,7 +241,7 @@ static struct tvnorm radio_mono = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index e95792fd70f8..011413cf34a8 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -40,7 +40,6 @@ static unsigned int no_autodetect = 0;
static unsigned int show_i2c = 0;
/* insmod options used at runtime => read/write */
-static unsigned int tuner_debug_old = 0;
int tuner_debug = 0;
static unsigned int tv_range[2] = { 44, 958 };
@@ -54,8 +53,6 @@ static char ntsc[] = "-";
module_param(addr, int, 0444);
module_param(no_autodetect, int, 0444);
module_param(show_i2c, int, 0444);
-/* Note: tuner_debug is deprecated and will be removed in 2.6.17 */
-module_param_named(tuner_debug,tuner_debug_old, int, 0444);
module_param_named(debug,tuner_debug, int, 0644);
module_param_string(pal, pal, sizeof(pal), 0644);
module_param_string(secam, secam, sizeof(secam), 0644);
@@ -442,11 +439,6 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
t->audmode = V4L2_TUNER_MODE_STEREO;
t->mode_mask = T_UNINITIALIZED;
t->tuner_status = tuner_status;
- if (tuner_debug_old) {
- tuner_debug = tuner_debug_old;
- printk(KERN_ERR "tuner: tuner_debug is deprecated and will be removed in 2.6.17.\n");
- printk(KERN_ERR "tuner: use the debug option instead.\n");
- }
if (show_i2c) {
unsigned char buffer[16];
@@ -730,14 +722,10 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct v4l2_frequency *f = arg;
+ if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY")
+ == EINVAL)
+ return 0;
switch_v4l2();
- if ((V4L2_TUNER_RADIO == f->type && V4L2_TUNER_RADIO != t->mode)
- || (V4L2_TUNER_DIGITAL_TV == f->type
- && V4L2_TUNER_DIGITAL_TV != t->mode)) {
- if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY")
- == EINVAL)
- return 0;
- }
set_freq(client,f->frequency);
break;
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 3f3182a24da1..56e01b622417 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -33,7 +33,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
-#include <linux/usb_input.h>
+#include <linux/usb/input.h>
#include "usbvideo.h"
#include "quickcam_messenger.h"
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 14e523471354..97f946db8597 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -1101,6 +1101,11 @@ const char **v4l2_ctrl_get_menu(u32 id)
"MPEG-2 SVCD-compatible Stream",
NULL
};
+ static const char *mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private packet, IVTV format",
+ NULL
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -1129,6 +1134,8 @@ const char **v4l2_ctrl_get_menu(u32 id)
return mpeg_video_bitrate_mode;
case V4L2_CID_MPEG_STREAM_TYPE:
return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
default:
return NULL;
}
@@ -1182,6 +1189,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_MPEG_STREAM_PID_PCR: name = "Stream PCR Program ID"; break;
case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: name = "Stream PES Audio ID"; break;
case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: name = "Stream PES Video ID"; break;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: name = "Stream VBI Format"; break;
default:
return -EINVAL;
@@ -1208,6 +1216,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_MPEG_VIDEO_ASPECT:
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
qctrl->type = V4L2_CTRL_TYPE_MENU;
step = 1;
break;
@@ -1367,6 +1376,11 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl)
return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO:
return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return v4l2_ctrl_query_fill(qctrl,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE,
+ V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE);
default:
return -EINVAL;
}
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 74714e5bcf03..3ff8378ea660 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -305,10 +305,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
}
out:
- if (pp0_array)
- kfree(pp0_array);
- if (p0_array)
- kfree(p0_array);
+ kfree(pp0_array);
+ kfree(p0_array);
return rc;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index af6ec553ff7c..85689ab46cbc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1378,8 +1378,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
return 0;
out_free_port_info:
- if (hba)
- kfree(hba);
+ kfree(hba);
out:
return error;
}
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index febbdd4e0605..3305c12372a2 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -683,9 +683,10 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
c->mem_alloc = 1;
sb->current_mem_size = 1 + res->end - res->start;
sb->current_mem_base = res->start;
- osm_info("%s: allocated %ld bytes of PCI memory at "
- "0x%08lX.\n", c->name,
- 1 + res->end - res->start, res->start);
+ osm_info("%s: allocated %llu bytes of PCI memory at "
+ "0x%016llX.\n", c->name,
+ (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)res->start);
}
}
@@ -704,9 +705,10 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
c->io_alloc = 1;
sb->current_io_size = 1 + res->end - res->start;
sb->current_mem_base = res->start;
- osm_info("%s: allocated %ld bytes of PCI I/O at 0x%08lX"
- ".\n", c->name, 1 + res->end - res->start,
- res->start);
+ osm_info("%s: allocated %llu bytes of PCI I/O at "
+ "0x%016llX.\n", c->name,
+ (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)res->start);
}
}
@@ -1239,7 +1241,6 @@ EXPORT_SYMBOL(i2o_cntxt_list_remove);
EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
#endif
EXPORT_SYMBOL(i2o_msg_get_wait);
-EXPORT_SYMBOL(i2o_msg_nop);
EXPORT_SYMBOL(i2o_find_iop);
EXPORT_SYMBOL(i2o_iop_find_device);
EXPORT_SYMBOL(i2o_event_register);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index aff83f966803..c8426a9bf273 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -420,8 +420,10 @@ static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
unsigned long mask;
mask = probe_irq_on();
- if (!mask)
+ if (!mask) {
+ probe_irq_off(mask);
return NO_IRQ;
+ }
/*
* Enable the ADC interrupt.
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 1fdf03fd2da7..9706cc19134a 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -85,7 +85,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
}
memset(sp, 0, sizeof(struct service_processor));
- sp->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&sp->lock);
INIT_LIST_HEAD(&sp->command_queue);
pci_set_drvdata(pdev, (void *)sp);
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index da8e4d7339cc..8576a65ca1c3 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -546,9 +546,9 @@ static int mmci_probe(struct amba_device *dev, void *id)
mmc_add_host(mmc);
- printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n",
+ printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
- dev->res.start, dev->irq[0], dev->irq[1]);
+ (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
init_timer(&host->timer);
host->timer.data = (unsigned long)host;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5ac265dde423..1344ad7a4b14 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -86,14 +86,14 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
block and "-2" means the penultimate block.
config MTD_REDBOOT_PARTS_UNALLOCATED
- bool " Include unallocated flash regions"
+ bool "Include unallocated flash regions"
depends on MTD_REDBOOT_PARTS
help
If you need to register each unallocated flash region as a MTD
'partition', enable this option.
config MTD_REDBOOT_PARTS_READONLY
- bool " Force read-only for RedBoot system images"
+ bool "Force read-only for RedBoot system images"
depends on MTD_REDBOOT_PARTS
help
If you need to force read-only for 'RedBoot', 'RedBoot Config' and
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0d435814aaa1..39edb8250fbc 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -357,6 +357,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
+ mtd->writesize = 1;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index c40b48dabed3..2c3f019197c1 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -256,6 +256,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
MTD->name = map->name;
MTD->type = MTD_NORFLASH;
MTD->flags = MTD_CAP_NORFLASH;
+ MTD->writesize = 1;
MTD->erasesize = SectorSize*(map->buswidth);
// printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
MTD->size = priv->size;
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index a611de9b1515..ac01a949b687 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -64,7 +64,8 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
mtd->write = map_absent_write;
mtd->sync = map_absent_sync;
mtd->flags = 0;
- mtd->erasesize = PAGE_SIZE;
+ mtd->erasesize = PAGE_SIZE;
+ mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 763925747db6..3a66680abfd0 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -71,6 +71,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->write = mapram_write;
mtd->sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
+ mtd->writesize = 1;
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index bc6ee9ef8a31..1b328b1378fd 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -47,6 +47,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->sync = maprom_nop;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = map->size;
+ mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 0d98c223c5fc..be3f1c136d02 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -324,6 +324,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 4ab7670770e4..08dfb899b272 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -225,6 +225,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->owner = THIS_MODULE;
mtd->read = ms02nv_read;
mtd->write = ms02nv_write;
+ mtd->writesize = 1;
ret = -EIO;
if (add_mtd_device(mtd)) {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a19480d07888..04271d02b6b6 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -478,6 +478,7 @@ add_dataflash(struct spi_device *spi, char *name,
device->name = (pdata && pdata->name) ? pdata->name : priv->name;
device->size = nr_pages * pagesize;
device->erasesize = pagesize;
+ device->writesize = pagesize;
device->owner = THIS_MODULE;
device->type = MTD_DATAFLASH;
device->flags = MTD_CAP_NORFLASH;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index e09e416667d3..6c7337f9ebbb 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -151,6 +151,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
new->mtd.erasesize = PAGE_SIZE;
+ new->mtd.writesize = 1;
ret = -EAGAIN;
if (add_mtd_device(&new->mtd)) {
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 666cce1bf60c..30f07b473ae2 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -551,11 +551,11 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
/*
* Some screen fun
*/
- printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%lx\n",
+ printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%llx\n",
(size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
+ (unsigned long long)((dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK));
/*
* Check to see the state of the memory
@@ -685,8 +685,8 @@ static int __init init_pmc551(void)
break;
}
- printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
- PCI_Device->resource[0].start);
+ printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
+ (unsigned long long)PCI_Device->resource[0].start);
/*
* The PMC551 device acts VERY weird if you don't init it
@@ -778,7 +778,8 @@ static int __init init_pmc551(void)
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
- mtd->owner = THIS_MODULE;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
if (add_mtd_device(mtd)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index b3f665e3c38b..542a0c009006 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -209,6 +209,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
+ (*curmtd)->mtdinfo->writesize = 1;
if (add_mtd_device((*curmtd)->mtdinfo)) {
E("slram: Failed to register new device\n");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 6bdaacc6d6f9..83d0b2a52527 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -212,7 +212,7 @@ config MTD_NETtel
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
config MTD_ALCHEMY
- tristate ' AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support'
+ tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
depends on SOC_AU1X00
help
Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index c350878d4592..a50587005263 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -123,9 +123,10 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
" %s(): Unable to register resource"
- " 0x%.08lx-0x%.08lx - kernel bug?\n",
+ " 0x%.16llx-0x%.16llx - kernel bug?\n",
__func__,
- window->rsrc.start, window->rsrc.end);
+ (unsigned long long)window->rsrc.start,
+ (unsigned long long)window->rsrc.end);
}
#if 0
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index ea5073781b3a..16732794edf3 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -177,9 +177,10 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
window->rsrc.parent = NULL;
printk(KERN_DEBUG MOD_NAME
": %s(): Unable to register resource"
- " 0x%.08lx-0x%.08lx - kernel bug?\n",
+ " 0x%.16llx-0x%.16llx - kernel bug?\n",
__func__,
- window->rsrc.start, window->rsrc.end);
+ (unsigned long long)window->rsrc.start,
+ (unsigned long long)window->rsrc.end);
}
/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 2c9cc7f37e92..c26488a1793a 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -42,7 +42,6 @@ struct ixp2000_flash_info {
struct map_info map;
struct mtd_partition *partitions;
struct resource *res;
- int nr_banks;
};
static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
@@ -183,7 +182,6 @@ static int ixp2000_flash_probe(struct platform_device *dev)
*/
info->map.phys = NO_XIP;
- info->nr_banks = ixp_data->nr_banks;
info->map.size = ixp_data->nr_banks * window_size;
info->map.bankwidth = 1;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 433c3cac3ca9..d6301f08906d 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -182,7 +182,7 @@ static struct physmap_flash_data physmap_flash_data = {
static struct resource physmap_flash_resource = {
.start = CONFIG_MTD_PHYSMAP_START,
- .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN,
+ .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
.flags = IORESOURCE_MEM,
};
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 28b8a571a91a..331a15859d71 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -164,8 +164,9 @@ static int __init init_scx200_docflash(void)
outl(pmr, scx200_cb_base + SCx200_PMR);
}
- printk(KERN_INFO NAME ": DOCCS mapped at 0x%lx-0x%lx, width %d\n",
- docmem.start, docmem.end, width);
+ printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n",
+ (unsigned long long)docmem.start,
+ (unsigned long long)docmem.end, width);
scx200_docflash_map.size = size;
if (width == 8)
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 0758cb1d0105..4db2055cee31 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -18,6 +18,7 @@
#include <linux/ioport.h>
#include <asm/ebus.h>
#include <asm/oplib.h>
+#include <asm/prom.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -30,146 +31,141 @@
#define UFLASH_WINDOW_SIZE 0x200000
#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
-MODULE_AUTHOR
- ("Eric Brower <ebrower@usa.net>");
-MODULE_DESCRIPTION
- ("User-programmable flash device on Sun Microsystems boardsets");
-MODULE_SUPPORTED_DEVICE
- ("userflash");
-MODULE_LICENSE
- ("GPL");
+MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
+MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
+MODULE_SUPPORTED_DEVICE("userflash");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0");
static LIST_HEAD(device_list);
struct uflash_dev {
- char * name; /* device name */
+ char *name; /* device name */
struct map_info map; /* mtd map info */
- struct mtd_info * mtd; /* mtd info */
- struct list_head list;
+ struct mtd_info *mtd; /* mtd info */
};
struct map_info uflash_map_templ = {
- .name = "SUNW,???-????",
- .size = UFLASH_WINDOW_SIZE,
- .bankwidth = UFLASH_BUSWIDTH,
+ .name = "SUNW,???-????",
+ .size = UFLASH_WINDOW_SIZE,
+ .bankwidth = UFLASH_BUSWIDTH,
};
-int uflash_devinit(struct linux_ebus_device* edev)
+int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp)
{
- int iTmp, nregs;
- struct linux_prom_registers regs[2];
- struct uflash_dev *pdev;
-
- iTmp = prom_getproperty(
- edev->prom_node, "reg", (void *)regs, sizeof(regs));
- if ((iTmp % sizeof(regs[0])) != 0) {
- printk("%s: Strange reg property size %d\n",
- UFLASH_DEVNAME, iTmp);
- return -ENODEV;
- }
+ struct uflash_dev *up;
+ struct resource *res;
- nregs = iTmp / sizeof(regs[0]);
+ res = &edev->resource[0];
- if (nregs != 1) {
+ if (edev->num_addrs != 1) {
/* Non-CFI userflash device-- once I find one we
* can work on supporting it.
*/
- printk("%s: unsupported device at 0x%lx (%d regs): " \
+ printk("%s: unsupported device at 0x%llx (%d regs): " \
"email ebrower@usa.net\n",
- UFLASH_DEVNAME, edev->resource[0].start, nregs);
+ dp->full_name, (unsigned long long)res->start,
+ edev->num_addrs);
+
return -ENODEV;
}
- if(0 == (pdev = kmalloc(sizeof(struct uflash_dev), GFP_KERNEL))) {
- printk("%s: unable to kmalloc new device\n", UFLASH_DEVNAME);
- return(-ENOMEM);
- }
+ up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
/* copy defaults and tweak parameters */
- memcpy(&pdev->map, &uflash_map_templ, sizeof(uflash_map_templ));
- pdev->map.size = regs[0].reg_size;
-
- iTmp = prom_getproplen(edev->prom_node, "model");
- pdev->name = kmalloc(iTmp, GFP_KERNEL);
- prom_getstring(edev->prom_node, "model", pdev->name, iTmp);
- if(0 != pdev->name && 0 < strlen(pdev->name)) {
- pdev->map.name = pdev->name;
- }
- pdev->map.phys = edev->resource[0].start;
- pdev->map.virt = ioremap_nocache(edev->resource[0].start, pdev->map.size);
- if(0 == pdev->map.virt) {
- printk("%s: failed to map device\n", __FUNCTION__);
- kfree(pdev->name);
- kfree(pdev);
- return(-1);
+ memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
+ up->map.size = (res->end - res->start) + 1UL;
+
+ up->name = of_get_property(dp, "model", NULL);
+ if (up->name && 0 < strlen(up->name))
+ up->map.name = up->name;
+
+ up->map.phys = res->start;
+
+ up->map.virt = ioremap_nocache(res->start, up->map.size);
+ if (!up->map.virt) {
+ printk("%s: Failed to map device.\n", dp->full_name);
+ kfree(up);
+
+ return -EINVAL;
}
- simple_map_init(&pdev->map);
+ simple_map_init(&up->map);
/* MTD registration */
- pdev->mtd = do_map_probe("cfi_probe", &pdev->map);
- if(0 == pdev->mtd) {
- iounmap(pdev->map.virt);
- kfree(pdev->name);
- kfree(pdev);
- return(-ENXIO);
+ up->mtd = do_map_probe("cfi_probe", &up->map);
+ if (!up->mtd) {
+ iounmap(up->map.virt);
+ kfree(up);
+
+ return -ENXIO;
}
- list_add(&pdev->list, &device_list);
+ up->mtd->owner = THIS_MODULE;
- pdev->mtd->owner = THIS_MODULE;
+ add_mtd_device(up->mtd);
- add_mtd_device(pdev->mtd);
- return(0);
+ dev_set_drvdata(&edev->ofdev.dev, up);
+
+ return 0;
}
-static int __init uflash_init(void)
+static int __devinit uflash_probe(struct of_device *dev, const struct of_device_id *match)
{
- struct linux_ebus *ebus = NULL;
- struct linux_ebus_device *edev = NULL;
-
- for_each_ebus(ebus) {
- for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, UFLASH_OBPNAME)) {
- if(0 > prom_getproplen(edev->prom_node, "user")) {
- DEBUG(2, "%s: ignoring device at 0x%lx\n",
- UFLASH_DEVNAME, edev->resource[0].start);
- } else {
- uflash_devinit(edev);
- }
- }
- }
- }
+ struct linux_ebus_device *edev = to_ebus_device(&dev->dev);
+ struct device_node *dp = dev->node;
- if(list_empty(&device_list)) {
- printk("%s: unable to locate device\n", UFLASH_DEVNAME);
+ if (of_find_property(dp, "user", NULL))
return -ENODEV;
- }
- return(0);
+
+ return uflash_devinit(edev, dp);
}
-static void __exit uflash_cleanup(void)
+static int __devexit uflash_remove(struct of_device *dev)
{
- struct list_head *udevlist;
- struct uflash_dev *udev;
-
- list_for_each(udevlist, &device_list) {
- udev = list_entry(udevlist, struct uflash_dev, list);
- DEBUG(2, "%s: removing device %s\n",
- UFLASH_DEVNAME, udev->name);
-
- if(0 != udev->mtd) {
- del_mtd_device(udev->mtd);
- map_destroy(udev->mtd);
- }
- if(0 != udev->map.virt) {
- iounmap(udev->map.virt);
- udev->map.virt = NULL;
- }
- kfree(udev->name);
- kfree(udev);
+ struct uflash_dev *up = dev_get_drvdata(&dev->dev);
+
+ if (up->mtd) {
+ del_mtd_device(up->mtd);
+ map_destroy(up->mtd);
}
+ if (up->map.virt) {
+ iounmap(up->map.virt);
+ up->map.virt = NULL;
+ }
+
+ kfree(up);
+
+ return 0;
+}
+
+static struct of_device_id uflash_match[] = {
+ {
+ .name = UFLASH_OBPNAME,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, uflash_match);
+
+static struct of_platform_driver uflash_driver = {
+ .name = UFLASH_DEVNAME,
+ .match_table = uflash_match,
+ .probe = uflash_probe,
+ .remove = __devexit_p(uflash_remove),
+};
+
+static int __init uflash_init(void)
+{
+ return of_register_driver(&uflash_driver, &ebus_bus_type);
+}
+
+static void __exit uflash_exit(void)
+{
+ of_unregister_driver(&uflash_driver);
}
module_init(uflash_init);
-module_exit(uflash_cleanup);
+module_exit(uflash_exit);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index aa18d45b264b..9a4b59d92525 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -78,7 +78,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
return -EINVAL;
}
- if (offset >= 0 && offset < mtd->size)
+ if (offset >= 0 && offset <= mtd->size)
return file->f_pos = offset;
return -EINVAL;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 27083ed0a017..80a76654d963 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1176,7 +1176,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
- return status;
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
@@ -1271,10 +1271,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
buf = nand_transfer_oob(chip, buf, ops);
- readlen -= ops->ooblen;
- if (!readlen)
- break;
-
if (!(chip->options & NAND_NO_READRDY)) {
/*
* Apply delay or wait for ready/busy pin. Do this
@@ -1288,6 +1284,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ readlen -= ops->ooblen;
+ if (!readlen)
+ break;
+
/* Increment page address */
realpage++;
@@ -1610,13 +1610,13 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (!writelen)
return 0;
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
/* Check, if it is write protected */
if (nand_check_wp(mtd))
return -EIO;
- chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index fe8d38514ba6..e5bd88f2d560 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -61,15 +61,15 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = &ndfc_ctrl;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
- writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_CMD);
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
else
- writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_ALE);
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
}
static int ndfc_ready(struct mtd_info *mtd)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2c262fe03d8a..ff5cef24d5bb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -63,8 +63,6 @@
#include <asm/arch/regs-nand.h>
#include <asm/arch/nand.h>
-#define PFX "s3c2410-nand: "
-
#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int hardware_ecc = 1;
#else
@@ -99,6 +97,12 @@ struct s3c2410_nand_mtd {
int scan_res;
};
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
/* overview of the s3c2410 nand state */
struct s3c2410_nand_info {
@@ -112,9 +116,11 @@ struct s3c2410_nand_info {
struct resource *area;
struct clk *clk;
void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
int mtd_count;
- unsigned char is_s3c2440;
+ enum s3c_cpu_type cpu_type;
};
/* conversion functions */
@@ -148,7 +154,7 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
#define NS_IN_KHZ 1000000
-static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
@@ -172,53 +178,58 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, struct platform_device *pdev)
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
+ struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
unsigned long clkrate = clk_get_rate(info->clk);
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
- unsigned long cfg;
+ unsigned long cfg = 0;
/* calculate the timing information for the controller */
clkrate /= 1000; /* turn clock into kHz for ease of use */
if (plat != NULL) {
- tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4);
- twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8);
- twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8);
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
} else {
/* default timings */
- tacls = 4;
+ tacls = tacls_max;
twrph0 = 8;
twrph1 = 8;
}
if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
- printk(KERN_ERR PFX "cannot get timings suitable for board\n");
+ dev_err(info->device, "cannot get suitable timings\n");
return -EINVAL;
}
- printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
- if (!info->is_s3c2440) {
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
cfg = S3C2410_NFCONF_EN;
cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
- } else {
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
cfg = S3C2440_NFCONF_TACLS(tacls - 1);
cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
/* enable the controller and de-assert nFCE */
- writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE,
- info->regs + S3C2440_NFCONT);
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
}
- pr_debug(PFX "NF_CONF is 0x%lx\n", cfg);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
writel(cfg, info->regs + S3C2410_NFCONF);
return 0;
@@ -231,26 +242,21 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct nand_chip *this = mtd->priv;
- void __iomem *reg;
unsigned long cur;
- unsigned long bit;
nmtd = this->priv;
info = nmtd->info;
- bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE;
- reg = info->regs + ((info->is_s3c2440) ? S3C2440_NFCONT : S3C2410_NFCONF);
-
if (chip != -1 && allow_clk_stop(info))
clk_enable(info->clk);
- cur = readl(reg);
+ cur = readl(info->sel_reg);
if (chip == -1) {
- cur |= bit;
+ cur |= info->sel_bit;
} else {
if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
- printk(KERN_ERR PFX "chip %d out of range\n", chip);
+ dev_err(info->device, "invalid chip %d\n", chip);
return;
}
@@ -259,10 +265,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
(info->platform->select_chip) (nmtd->set, chip);
}
- cur &= ~bit;
+ cur &= ~info->sel_bit;
}
- writel(cur, reg);
+ writel(cur, info->sel_reg);
if (chip == -1 && allow_clk_stop(info))
clk_disable(info->clk);
@@ -311,15 +317,25 @@ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
static int s3c2410_nand_devready(struct mtd_info *mtd)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (info->is_s3c2440)
- return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
}
+static int s3c2440_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
/* ECC handling functions */
-static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
{
pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc);
@@ -487,11 +503,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_set *set)
{
struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
- chip->IO_ADDR_R = info->regs + S3C2410_NFDATA;
- chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
- chip->cmd_ctrl = s3c2410_nand_hwcontrol;
- chip->dev_ready = s3c2410_nand_devready;
chip->write_buf = s3c2410_nand_write_buf;
chip->read_buf = s3c2410_nand_read_buf;
chip->select_chip = s3c2410_nand_select_chip;
@@ -500,11 +513,37 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->options = 0;
chip->controller = &info->controller;
- if (info->is_s3c2440) {
- chip->IO_ADDR_R = info->regs + S3C2440_NFDATA;
- chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
- chip->cmd_ctrl = s3c2440_nand_hwcontrol;
- }
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2440_nand_devready;
+ break;
+
+ case TYPE_S3C2412:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->IO_ADDR_R = chip->IO_ADDR_W;
nmtd->info = info;
nmtd->mtd.priv = chip;
@@ -512,17 +551,25 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->set = set;
if (hardware_ecc) {
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.layout = &nand_hw_eccoob;
- if (info->is_s3c2440) {
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+
}
} else {
chip->ecc.mode = NAND_ECC_SOFT;
@@ -537,7 +584,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
* nand layer to look for devices
*/
-static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
+static int s3c24xx_nand_probe(struct platform_device *pdev,
+ enum s3c_cpu_type cpu_type)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
struct s3c2410_nand_info *info;
@@ -592,7 +640,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
info->device = &pdev->dev;
info->platform = plat;
info->regs = ioremap(res->start, size);
- info->is_s3c2440 = is_s3c2440;
+ info->cpu_type = cpu_type;
if (info->regs == NULL) {
dev_err(&pdev->dev, "cannot reserve register region\n");
@@ -699,12 +747,17 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
static int s3c2410_nand_probe(struct platform_device *dev)
{
- return s3c24xx_nand_probe(dev, 0);
+ return s3c24xx_nand_probe(dev, TYPE_S3C2410);
}
static int s3c2440_nand_probe(struct platform_device *dev)
{
- return s3c24xx_nand_probe(dev, 1);
+ return s3c24xx_nand_probe(dev, TYPE_S3C2440);
+}
+
+static int s3c2412_nand_probe(struct platform_device *dev)
+{
+ return s3c24xx_nand_probe(dev, TYPE_S3C2412);
}
static struct platform_driver s3c2410_nand_driver = {
@@ -729,16 +782,29 @@ static struct platform_driver s3c2440_nand_driver = {
},
};
+static struct platform_driver s3c2412_nand_driver = {
+ .probe = s3c2412_nand_probe,
+ .remove = s3c2410_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .driver = {
+ .name = "s3c2412-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init s3c2410_nand_init(void)
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
+ platform_driver_register(&s3c2412_nand_driver);
platform_driver_register(&s3c2440_nand_driver);
return platform_driver_register(&s3c2410_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
+ platform_driver_unregister(&s3c2412_nand_driver);
platform_driver_unregister(&s3c2440_nand_driver);
platform_driver_unregister(&s3c2410_nand_driver);
}
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index a0b4b1edcb0d..f40081069ab2 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -97,7 +97,7 @@ static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
unsigned char bits;
- bits = (ctrl & NAND_CNE) << 2;
+ bits = (ctrl & NAND_NCE) << 2;
bits |= ctrl & NAND_CLE;
bits |= (ctrl & NAND_ALE) >> 2;
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index bb44509fd404..07136ec423bd 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -508,11 +508,11 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
* speak of. We simply pull the packet out of its PIO buffer (which is slow)
* and queue it for the kernel. Then we reset the card for the next packet.
*
- * We sometimes get suprise interrupts late both because the SMP IRQ delivery
+ * We sometimes get surprise interrupts late both because the SMP IRQ delivery
* is message passing and because the card sometimes seems to deliver late. I
* think if it is part way through a receive and the mode is changed it carries
* on receiving and sends us an interrupt. We have to band aid all these cases
- * to get a sensible 150kbytes/second performance. Even then you want a small
+ * to get a sensible 150kBytes/second performance. Even then you want a small
* TCP window.
*/
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e27778926eba..d2f808979a2b 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,8 +375,7 @@ limit of 4K.
of the drivers, and will likely be provided by some future kernel.
*/
enum pci_flags_bit {
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+ PCI_USES_MASTER=4,
};
enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
@@ -446,95 +445,95 @@ static struct vortex_chip_info {
int io_size;
} vortex_info_tbl[] __devinitdata = {
{"3c590 Vortex 10Mbps",
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c595 Vortex 100baseTx",
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c595 Vortex 100baseT4",
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c595 Vortex 100base-MII",
- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c900 Boomerang 10baseT",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
{"3c900 Boomerang 10Mbps Combo",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
{"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c900 Cyclone 10Mbps Combo",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c900B-FL Cyclone 10base-FL",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c905 Boomerang 100baseTx",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
{"3c905 Boomerang 100baseT4",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
{"3c905B Cyclone 100baseTx",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c905B Cyclone 10/100/BNC",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c905B-FX Cyclone 100baseFx",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c905C Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
{"3c980 Cyclone",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c980C Python-T",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3cSOHO100-TX Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c555 Laptop Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
{"3c556 Laptop Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
HAS_HWCKSM, 128, },
{"3c556B Laptop Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
WNO_XCVR_PWR|HAS_HWCKSM, 128, },
{"3c575 [Megahertz] 10/100 LAN CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
{"3c575 Boomerang CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
{"3CCFE575BT Cyclone CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CCFE575CT Tornado CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
{"3CCFE656 Cyclone CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CCFEM656B Cyclone+Winmodem CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
{"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c920 Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c982 Hydra Dual Port A",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
{"3c982 Hydra Dual Port B",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
{"3c905B-T4",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c920B-EMB-WNM Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
{NULL,}, /* NULL terminated list. */
};
@@ -1408,8 +1407,10 @@ static int __devinit vortex_probe1(struct device *gendev,
}
if (print_info) {
- printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
- print_name, pci_resource_start(pdev, 2),
+ printk(KERN_INFO "%s: CardBus functions mapped "
+ "%16.16llx->%p\n",
+ print_name,
+ (unsigned long long)pci_resource_start(pdev, 2),
vp->cb_fn_base);
}
EL3WINDOW(2);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 0cdc830449d8..d26dd6a7062d 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1823,7 +1823,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
struct cp_private *cp;
int rc;
void __iomem *regs;
- long pciaddr;
+ resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
u8 pci_rev;
@@ -1883,8 +1883,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
- pci_resource_len(pdev, 1), pci_name(pdev));
+ printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
goto err_out_res;
}
@@ -1916,8 +1916,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
regs = ioremap(pciaddr, CP_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
- pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
+ printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%llx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index abd6261465f1..ed2e3c03bc88 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1341,9 +1341,9 @@ static int rtl8139_open (struct net_device *dev)
netif_start_queue (dev);
if (netif_msg_ifup(tp))
- printk(KERN_DEBUG "%s: rtl8139_open() ioaddr %#lx IRQ %d"
- " GP Pins %2.2x %s-duplex.\n",
- dev->name, pci_resource_start (tp->pci_dev, 1),
+ printk(KERN_DEBUG "%s: rtl8139_open() ioaddr %#llx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n", dev->name,
+ (unsigned long long)pci_resource_start (tp->pci_dev, 1),
dev->irq, RTL_R8 (MediaStatus),
tp->mii.full_duplex ? "full" : "half");
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 6e75482d75f2..53449207e53b 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -683,11 +683,6 @@ struct netdev_private {
};
/* The station address location in the EEPROM. */
-#ifdef MEM_MAPPING
-#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
-#else
-#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
-#endif
/* The struct pci_device_id consist of:
vendor, device Vendor and device ID to match (or PCI_ANY_ID)
subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID)
@@ -695,9 +690,10 @@ struct netdev_private {
class_mask of the class are honored during the comparison.
driver_data Data private to the driver.
*/
-static struct pci_device_id rio_pci_tbl[] = {
- {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0,}
+
+static const struct pci_device_id rio_pci_tbl[] = {
+ {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
+ { }
};
MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
#define TX_TIMEOUT (4*HZ)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 24996da4c1c4..7965a9b08e79 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -410,10 +410,7 @@ dm9000_probe(struct platform_device *pdev)
if (pdev->num_resources < 2) {
ret = -ENODEV;
goto out;
- }
-
- switch (pdev->num_resources) {
- case 2:
+ } else if (pdev->num_resources == 2) {
base = pdev->resource[0].start;
if (!request_mem_region(base, 4, ndev->name)) {
@@ -423,17 +420,16 @@ dm9000_probe(struct platform_device *pdev)
ndev->base_addr = base;
ndev->irq = pdev->resource[1].start;
- db->io_addr = (void *)base;
- db->io_data = (void *)(base + 4);
-
- break;
+ db->io_addr = (void __iomem *)base;
+ db->io_data = (void __iomem *)(base + 4);
- case 3:
+ } else {
db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (db->addr_res == NULL || db->data_res == NULL) {
+ if (db->addr_res == NULL || db->data_res == NULL ||
+ db->irq_res == NULL) {
printk(KERN_ERR PFX "insufficient resources\n");
ret = -ENOENT;
goto out;
@@ -482,7 +478,6 @@ dm9000_probe(struct platform_device *pdev)
/* ensure at least we have a default set of IO routines */
dm9000_set_io(db, iosize);
-
}
/* check to see if anything is being over-ridden */
@@ -564,6 +559,13 @@ dm9000_probe(struct platform_device *pdev)
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = db->srom[i];
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ /* try reading from mac */
+
+ for (i = 0; i < 6; i++)
+ ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ }
+
if (!is_valid_ether_addr(ndev->dev_addr))
printk("%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
@@ -663,7 +665,6 @@ dm9000_init_dm9000(struct net_device *dev)
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
dev->trans_start = 0;
- spin_lock_init(&db->lock);
}
/*
@@ -767,7 +768,7 @@ dm9000_stop(struct net_device *ndev)
* receive the packet to upper layer, free the transmitted packet
*/
-void
+static void
dm9000_tx_done(struct net_device *dev, board_info_t * db)
{
int tx_status = ior(db, DM9000_NSR); /* Got TX status */
@@ -1187,13 +1188,14 @@ dm9000_drv_remove(struct platform_device *pdev)
}
static struct platform_driver dm9000_driver = {
+ .driver = {
+ .name = "dm9000",
+ .owner = THIS_MODULE,
+ },
.probe = dm9000_probe,
.remove = dm9000_drv_remove,
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
- .driver = {
- .name = "dm9000",
- },
};
static int __init
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index f37170cc1a37..93a286570923 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2678,9 +2678,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
goto err_out_free;
}
- DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, "
+ DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
"MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
- pci_resource_start(pdev, 0), pdev->irq,
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 467fc861360d..ecf5ad85a684 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -278,11 +278,6 @@ having to sign an Intel NDA when I'm helping Intel sell their own product!
static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
-enum pci_flags_bit {
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
-};
-
/* Offsets to the various registers.
All accesses need not be longword aligned. */
enum speedo_offsets {
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 724d7dc35fa3..ee34a16eb4e2 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -191,23 +191,10 @@ IVc. Errata
*/
-enum pci_id_flags_bits {
- /* Set PCI command register bits before calling probe1(). */
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- /* Read and map the single following PCI BAR. */
- PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
- PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
-};
-
enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
#define EPIC_TOTAL_SIZE 0x100
#define USE_IO_OPS 1
-#ifdef USE_IO_OPS
-#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
-#else
-#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
-#endif
typedef enum {
SMSC_83C170_0,
@@ -218,7 +205,6 @@ typedef enum {
struct epic_chip_info {
const char *name;
- enum pci_id_flags_bits pci_flags;
int io_size; /* Needed for I/O region check or ioremap(). */
int drv_flags; /* Driver use, intended as capability flags. */
};
@@ -227,11 +213,11 @@ struct epic_chip_info {
/* indexed by chip_t */
static const struct epic_chip_info pci_id_tbl[] = {
{ "SMSC EPIC/100 83c170",
- EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
+ EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
{ "SMSC EPIC/100 83c170",
- EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
+ EPIC_TOTAL_SIZE, TYPE2_INTR },
{ "SMSC EPIC/C 83c175",
- EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
+ EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
};
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index a8449265e5fd..13eca7ede2af 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -126,16 +126,6 @@ MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
#define MIN_REGION_SIZE 136
-enum pci_flags_bit {
- PCI_USES_IO = 1,
- PCI_USES_MEM = 2,
- PCI_USES_MASTER = 4,
- PCI_ADDR0 = 0x10 << 0,
- PCI_ADDR1 = 0x10 << 1,
- PCI_ADDR2 = 0x10 << 2,
- PCI_ADDR3 = 0x10 << 3,
-};
-
/* A chip capabilities table, matching the entries in pci_tbl[] above. */
enum chip_capability_flags {
HAS_MII_XCVR,
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index bd6983d1afba..db694c832989 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -22,7 +22,7 @@
* Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
*
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
- * Copyright (c) 2004-2005 Macq Electronique SA.
+ * Copyright (c) 2004-2006 Macq Electronique SA.
*/
#include <linux/config.h>
@@ -51,7 +51,7 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
@@ -80,6 +80,8 @@ static unsigned int fec_hw[] = {
(MCF_MBAR + 0x1000),
#elif defined(CONFIG_M520x)
(MCF_MBAR+0x30000),
+#elif defined(CONFIG_M532x)
+ (MCF_MBAR+0xfc030000),
#else
&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
#endif
@@ -143,7 +145,7 @@ typedef struct {
#define TX_RING_MOD_MASK 15 /* for this to work */
#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
-#error "FEC: descriptor ring size contants too large"
+#error "FEC: descriptor ring size constants too large"
#endif
/* Interrupt events/masks.
@@ -167,12 +169,12 @@ typedef struct {
/*
- * The 5270/5271/5280/5282 RX control register also contains maximum frame
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -308,6 +310,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct fec_enet_private *fep;
volatile fec_t *fecp;
volatile cbd_t *bdp;
+ unsigned short status;
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
@@ -320,8 +323,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
+ status = bdp->cbd_sc;
#ifndef final_version
- if (bdp->cbd_sc & BD_ENET_TX_READY) {
+ if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since dev->tbusy should be set.
*/
@@ -332,7 +336,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Clear all of the status flags.
*/
- bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+ status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer.
*/
@@ -366,21 +370,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&fep->lock);
- /* Send it on its way. Tell FEC its ready, interrupt when done,
- * its the last BD of the frame, and to put the CRC on the end.
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
*/
- bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
dev->trans_start = jiffies;
/* Trigger transmission start */
- fecp->fec_x_des_active = 0x01000000;
+ fecp->fec_x_des_active = 0;
/* If this was the last BD in the ring, start at the beginning again.
*/
- if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
+ if (status & BD_ENET_TX_WRAP) {
bdp = fep->tx_bd_base;
} else {
bdp++;
@@ -491,43 +496,44 @@ fec_enet_tx(struct net_device *dev)
{
struct fec_enet_private *fep;
volatile cbd_t *bdp;
+ unsigned short status;
struct sk_buff *skb;
fep = netdev_priv(dev);
spin_lock(&fep->lock);
bdp = fep->dirty_tx;
- while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
+ while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
if (bdp == fep->cur_tx && fep->tx_full == 0) break;
skb = fep->tx_skbuff[fep->skb_dirty];
/* Check for errors. */
- if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL)) {
fep->stats.tx_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */
+ if (status & BD_ENET_TX_HB) /* No heartbeat */
fep->stats.tx_heartbeat_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */
+ if (status & BD_ENET_TX_LC) /* Late collision */
fep->stats.tx_window_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */
+ if (status & BD_ENET_TX_RL) /* Retrans limit */
fep->stats.tx_aborted_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */
+ if (status & BD_ENET_TX_UN) /* Underrun */
fep->stats.tx_fifo_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
+ if (status & BD_ENET_TX_CSL) /* Carrier lost */
fep->stats.tx_carrier_errors++;
} else {
fep->stats.tx_packets++;
}
#ifndef final_version
- if (bdp->cbd_sc & BD_ENET_TX_READY)
+ if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
#endif
/* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
- if (bdp->cbd_sc & BD_ENET_TX_DEF)
+ if (status & BD_ENET_TX_DEF)
fep->stats.collisions++;
/* Free the sk buffer associated with this last transmit.
@@ -538,7 +544,7 @@ fec_enet_tx(struct net_device *dev)
/* Update pointer to next buffer descriptor to be transmitted.
*/
- if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp++;
@@ -568,9 +574,14 @@ fec_enet_rx(struct net_device *dev)
struct fec_enet_private *fep;
volatile fec_t *fecp;
volatile cbd_t *bdp;
+ unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
@@ -580,13 +591,13 @@ fec_enet_rx(struct net_device *dev)
*/
bdp = fep->cur_rx;
-while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
+while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
#ifndef final_version
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
- if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
+ if ((status & BD_ENET_RX_LAST) == 0)
printk("FEC ENET: rcv is not +last\n");
#endif
@@ -594,26 +605,26 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
goto rx_processing_done;
/* Check for errors. */
- if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
fep->stats.rx_errors++;
- if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
/* Frame too long or too short. */
fep->stats.rx_length_errors++;
}
- if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
fep->stats.rx_frame_errors++;
- if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
- fep->stats.rx_crc_errors++;
- if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
+ if (status & BD_ENET_RX_CR) /* CRC Error */
fep->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ fep->stats.rx_fifo_errors++;
}
/* Report late collisions as a frame error.
* On this error, the BD is closed, but we don't know what we
* have in the buffer. So, just drop this frame on the floor.
*/
- if (bdp->cbd_sc & BD_ENET_RX_CL) {
+ if (status & BD_ENET_RX_CL) {
fep->stats.rx_errors++;
fep->stats.rx_frame_errors++;
goto rx_processing_done;
@@ -639,9 +650,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
} else {
skb->dev = dev;
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb,
- (unsigned char *)__va(bdp->cbd_bufaddr),
- pkt_len-4, 0);
+ eth_copy_and_sum(skb, data, pkt_len-4, 0);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
@@ -649,15 +658,16 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
/* Clear the status flags for this buffer.
*/
- bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+ status &= ~BD_ENET_RX_STATS;
/* Mark the buffer empty.
*/
- bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
/* Update BD pointer to next entry.
*/
- if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+ if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
bdp++;
@@ -667,9 +677,9 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
#endif
- } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
+ } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
fep->cur_rx = (cbd_t *)bdp;
#if 0
@@ -680,11 +690,12 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
* our way back to the interrupt return only to come right back
* here.
*/
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
#endif
}
+/* called from interrupt context */
static void
fec_enet_mii(struct net_device *dev)
{
@@ -696,10 +707,12 @@ fec_enet_mii(struct net_device *dev)
fep = netdev_priv(dev);
ep = fep->hwp;
mii_reg = ep->fec_mii_data;
+
+ spin_lock(&fep->lock);
if ((mip = mii_head) == NULL) {
printk("MII and no head!\n");
- return;
+ goto unlock;
}
if (mip->mii_func != NULL)
@@ -711,6 +724,9 @@ fec_enet_mii(struct net_device *dev)
if ((mip = mii_head) != NULL)
ep->fec_mii_data = mip->mii_regval;
+
+unlock:
+ spin_unlock(&fep->lock);
}
static int
@@ -728,8 +744,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
retval = 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&fep->lock,flags);
if ((mip = mii_free) != NULL) {
mii_free = mip->mii_next;
@@ -749,7 +764,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
retval = 1;
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&fep->lock,flags);
return(retval);
}
@@ -1216,7 +1231,7 @@ static phy_info_t const * const phy_info[] = {
};
/* ------------------------------------------------------------------------- */
-
+#if !defined(CONFIG_M532x)
#ifdef CONFIG_RPXCLASSIC
static void
mii_link_interrupt(void *dev_id);
@@ -1224,6 +1239,7 @@ mii_link_interrupt(void *dev_id);
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs);
#endif
+#endif
#if defined(CONFIG_M5272)
@@ -1384,13 +1400,13 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
{
volatile unsigned char *icrp;
volatile unsigned long *imrp;
- int i;
+ int i, ilip;
b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0;
icrp = (volatile unsigned char *) (MCF_IPSBAR + b +
MCFINTC_ICR0);
- for (i = 23; (i < 36); i++)
- icrp[i] = 0x23;
+ for (i = 23, ilip = 0x28; (i < 36); i++)
+ icrp[i] = ilip--;
imrp = (volatile unsigned long *) (MCF_IPSBAR + b +
MCFINTC_IMRH);
@@ -1618,6 +1634,159 @@ static void __inline__ fec_uncache(unsigned long addr)
/* ------------------------------------------------------------------------- */
+#elif defined(CONFIG_M532x)
+/*
+ * Code specific for M532x
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ int b;
+ static const struct idesc {
+ char *name;
+ unsigned short irq;
+ } *idp, id[] = {
+ { "fec(TXF)", 36 },
+ { "fec(TXB)", 37 },
+ { "fec(TXFIFO)", 38 },
+ { "fec(TXCR)", 39 },
+ { "fec(RXF)", 40 },
+ { "fec(RXB)", 41 },
+ { "fec(MII)", 42 },
+ { "fec(LC)", 43 },
+ { "fec(HBERR)", 44 },
+ { "fec(GRA)", 45 },
+ { "fec(EBERR)", 46 },
+ { "fec(BABT)", 47 },
+ { "fec(BABR)", 48 },
+ { NULL },
+ };
+
+ fep = netdev_priv(dev);
+ b = (fep->index) ? 128 : 64;
+
+ /* Setup interrupt handlers. */
+ for (idp = id; idp->name; idp++) {
+ if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+ printk("FEC: Could not allocate %s IRQ(%d)!\n",
+ idp->name, b+idp->irq);
+ }
+
+ /* Unmask interrupts */
+ MCF_INTC0_ICR36 = 0x2;
+ MCF_INTC0_ICR37 = 0x2;
+ MCF_INTC0_ICR38 = 0x2;
+ MCF_INTC0_ICR39 = 0x2;
+ MCF_INTC0_ICR40 = 0x2;
+ MCF_INTC0_ICR41 = 0x2;
+ MCF_INTC0_ICR42 = 0x2;
+ MCF_INTC0_ICR43 = 0x2;
+ MCF_INTC0_ICR44 = 0x2;
+ MCF_INTC0_ICR45 = 0x2;
+ MCF_INTC0_ICR46 = 0x2;
+ MCF_INTC0_ICR47 = 0x2;
+ MCF_INTC0_ICR48 = 0x2;
+
+ MCF_INTC0_IMRH &= ~(
+ MCF_INTC_IMRH_INT_MASK36 |
+ MCF_INTC_IMRH_INT_MASK37 |
+ MCF_INTC_IMRH_INT_MASK38 |
+ MCF_INTC_IMRH_INT_MASK39 |
+ MCF_INTC_IMRH_INT_MASK40 |
+ MCF_INTC_IMRH_INT_MASK41 |
+ MCF_INTC_IMRH_INT_MASK42 |
+ MCF_INTC_IMRH_INT_MASK43 |
+ MCF_INTC_IMRH_INT_MASK44 |
+ MCF_INTC_IMRH_INT_MASK45 |
+ MCF_INTC_IMRH_INT_MASK46 |
+ MCF_INTC_IMRH_INT_MASK47 |
+ MCF_INTC_IMRH_INT_MASK48 );
+
+ /* Set up gpio outputs for MII lines */
+ MCF_GPIO_PAR_FECI2C |= (0 |
+ MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
+ MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO);
+ MCF_GPIO_PAR_FEC = (0 |
+ MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC |
+ MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC);
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ */
+ fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
+ fecp->fec_mii_speed = fep->phy_speed;
+
+ fec_restart(dev, 0);
+}
+
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+
+ fecp = fep->hwp;
+
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ } else {
+ *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
+ *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+}
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+}
+
+static void __inline__ fec_localhw_setup(void)
+{
+}
+
+/*
+ * Do not need to make region uncached on 532x.
+ */
+static void __inline__ fec_uncache(unsigned long addr)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+
#else
/*
@@ -1985,9 +2154,12 @@ fec_enet_open(struct net_device *dev)
mii_do_cmd(dev, fep->phy->config);
mii_do_cmd(dev, phy_cmd_config); /* display configuration */
- /* FIXME: use netif_carrier_{on,off} ; this polls
- * until link is up which is wrong... could be
- * 30 seconds or more we are trapped in here. -jgarzik
+ /* Poll until the PHY tells us its configuration
+ * (not link state).
+ * Request is initiated by mii_do_cmd above, but answer
+ * comes by interrupt.
+ * This should take about 25 usec per register at 2.5 MHz,
+ * and we read approximately 5 registers.
*/
while(!fep->sequence_done)
schedule();
@@ -2253,15 +2425,11 @@ int __init fec_enet_init(struct net_device *dev)
*/
fec_request_intrs(dev);
- /* Clear and enable interrupts */
- fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
fecp->fec_hash_table_high = 0;
fecp->fec_hash_table_low = 0;
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
dev->base_addr = (unsigned long)fecp;
@@ -2281,6 +2449,11 @@ int __init fec_enet_init(struct net_device *dev)
/* setup MII interface */
fec_set_mii(dev, fep);
+ /* Clear and enable interrupts */
+ fecp->fec_ievent = 0xffc00000;
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
*/
@@ -2312,11 +2485,6 @@ fec_restart(struct net_device *dev, int duplex)
fecp->fec_ecntrl = 1;
udelay(10);
- /* Enable interrupts we wish to service.
- */
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
-
/* Clear any outstanding interrupt.
*/
fecp->fec_ievent = 0xffc00000;
@@ -2408,7 +2576,12 @@ fec_restart(struct net_device *dev, int duplex)
/* And last, enable the transmit and receive processing.
*/
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
+
+ /* Enable interrupts we wish to service.
+ */
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
}
static void
@@ -2420,9 +2593,16 @@ fec_stop(struct net_device *dev)
fep = netdev_priv(dev);
fecp = fep->hwp;
- fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
-
- while(!(fecp->fec_ievent & FEC_ENET_GRA));
+ /*
+ ** We cannot expect a graceful transmit stop without link !!!
+ */
+ if (fep->link)
+ {
+ fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
+ udelay(10);
+ if (!(fecp->fec_ievent & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
/* Whack a reset. We should wait for this.
*/
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 965c5c49fcdc..1d421606984f 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,7 @@
/****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
index c6770377ef87..0cd07150bf4a 100644
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -431,8 +431,7 @@ static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
return bus;
err:
- if (bus)
- kfree(bus);
+ kfree(bus);
return ERR_PTR(ret);
}
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0d5fccc984bb..c9a46b89942a 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -436,7 +436,7 @@ static int __init dmascc_init(void)
module_init(dmascc_init);
module_exit(dmascc_exit);
-static void dev_setup(struct net_device *dev)
+static void __init dev_setup(struct net_device *dev)
{
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 98fa5319e5cc..44efd49bf4a9 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -386,7 +386,7 @@ static int __irport_change_speed(struct irda_task *task)
/* Locking notes : this function may be called from irq context with
* spinlock, via irport_write_wakeup(), or from non-interrupt without
* spinlock (from the task timer). Yuck !
- * This is ugly, and unsafe is the spinlock is not already aquired.
+ * This is ugly, and unsafe is the spinlock is not already acquired.
* This will be fixed when irda-task get rewritten.
* Jean II */
if (!spin_is_locked(&self->lock)) {
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index cc7ff8f00e42..cb62f2a9676a 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -115,8 +115,12 @@ static nsc_chip_t chips[] = {
/* Contributed by Jan Frey - IBM A30/A31 */
{ "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
- { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
- nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8738x (T60/X60/Z60) */
+ { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8394T (T43/R52/?) */
+ { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
{ NULL }
};
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2e4ecedba057..5657049c2160 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -226,7 +226,6 @@ static int full_duplex[MAX_UNITS];
NATSEMI_PG1_NREGS)
#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
-#define NATSEMI_DEF_EEPROM_SIZE 24 /* 12 16-bit values */
/* Buffer sizes:
* The nic writes 32-bit values, even if the upper bytes of
@@ -344,18 +343,6 @@ None characterised.
-enum pcistuff {
- PCI_USES_IO = 0x01,
- PCI_USES_MEM = 0x02,
- PCI_USES_MASTER = 0x04,
- PCI_ADDR0 = 0x08,
- PCI_ADDR1 = 0x10,
-};
-
-/* MMIO operations required */
-#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
-
-
/*
* Support for fibre connections on Am79C874:
* This phy needs a special setup when connected to a fibre cable.
@@ -363,22 +350,25 @@ enum pcistuff {
*/
#define PHYID_AM79C874 0x0022561b
-#define MII_MCTRL 0x15 /* mode control register */
-#define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */
-#define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */
+enum {
+ MII_MCTRL = 0x15, /* mode control register */
+ MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
+ MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
+};
/* array of board data directly indexed by pci_tbl[x].driver_data */
static const struct {
const char *name;
unsigned long flags;
+ unsigned int eeprom_size;
} natsemi_pci_info[] __devinitdata = {
- { "NatSemi DP8381[56]", PCI_IOTYPE },
+ { "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] = {
- { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
- { 0, },
+static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
@@ -813,6 +803,42 @@ static void move_int_phy(struct net_device *dev, int addr)
udelay(1);
}
+static void __devinit natsemi_init_media (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 tmp;
+
+ netif_carrier_off(dev);
+
+ /* get the initial settings from hardware */
+ tmp = mdio_read(dev, MII_BMCR);
+ np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
+ np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
+ np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
+ np->advertising= mdio_read(dev, MII_ADVERTISE);
+
+ if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
+ && netif_msg_probe(np)) {
+ printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
+ "10%s %s duplex.\n",
+ pci_name(np->pci_dev),
+ (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
+ "enabled, advertise" : "disabled, force",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_100HALF))?
+ "0" : "",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_10FULL))?
+ "full" : "half");
+ }
+ if (netif_msg_probe(np))
+ printk(KERN_INFO
+ "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
+ pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
+ np->advertising);
+
+}
+
static int __devinit natsemi_probe1 (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -852,8 +878,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
iosize = pci_resource_len(pdev, pcibar);
irq = pdev->irq;
- if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
- pci_set_master(pdev);
+ pci_set_master(pdev);
dev = alloc_etherdev(sizeof (struct netdev_private));
if (!dev)
@@ -892,7 +917,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
np->hands_off = 0;
np->intr_status = 0;
- np->eeprom_size = NATSEMI_DEF_EEPROM_SIZE;
+ np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
/* Initial port:
* - If the nic was configured to use an external phy and if find_mii
@@ -957,34 +982,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
if (mtu)
dev->mtu = mtu;
- netif_carrier_off(dev);
-
- /* get the initial settings from hardware */
- tmp = mdio_read(dev, MII_BMCR);
- np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
- np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
- np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
- np->advertising= mdio_read(dev, MII_ADVERTISE);
-
- if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
- && netif_msg_probe(np)) {
- printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
- "10%s %s duplex.\n",
- pci_name(np->pci_dev),
- (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
- "enabled, advertise" : "disabled, force",
- (np->advertising &
- (ADVERTISE_100FULL|ADVERTISE_100HALF))?
- "0" : "",
- (np->advertising &
- (ADVERTISE_100FULL|ADVERTISE_10FULL))?
- "full" : "half");
- }
- if (netif_msg_probe(np))
- printk(KERN_INFO
- "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
- pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
- np->advertising);
+ natsemi_init_media(dev);
/* save the silicon revision for later querying */
np->srr = readl(ioaddr + SiliconRev);
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index e74bf5014ef6..a73d54553030 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1883,7 +1883,7 @@ static void smc_reset(struct net_device *dev)
/* Set the Window 1 control, configuration and station addr registers.
No point in writing the I/O base register ;-> */
SMC_SELECT_BANK(1);
- /* Automatically release succesfully transmitted packets,
+ /* Automatically release successfully transmitted packets,
Accept link errors, counter and Tx error interrupts. */
outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
ioaddr + CONTROL);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index fc08c4af506c..0e01c75da429 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -309,12 +309,6 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name);
static void pcnet32_free_ring(struct net_device *dev);
static void pcnet32_check_media(struct net_device *dev, int verbose);
-enum pci_flags_bit {
- PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
- PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
- 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
-};
-
static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
{
outw(index, addr + PCNET32_WIO_RAP);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index bef79e454c33..3f702c503afe 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -123,9 +123,9 @@ static int lxt971_config_intr(struct phy_device *phydev)
}
static struct phy_driver lxt970_driver = {
- .phy_id = 0x07810000,
+ .phy_id = 0x78100000,
.name = "LXT970",
- .phy_id_mask = 0x0fffffff,
+ .phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
@@ -137,9 +137,9 @@ static struct phy_driver lxt970_driver = {
};
static struct phy_driver lxt971_driver = {
- .phy_id = 0x0001378e,
+ .phy_id = 0x001378e0,
.name = "LXT971",
- .phy_id_mask = 0x0fffffff,
+ .phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 01cd8ec751ea..d643a097faa5 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2578,8 +2578,7 @@ ppp_find_channel(int unit)
list_for_each_entry(pch, &new_channels, list) {
if (pch->file.index == unit) {
- list_del(&pch->list);
- list_add(&pch->list, &all_channels);
+ list_move(&pch->list, &all_channels);
return pch;
}
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 19a4a16055dc..1608efab4e3d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3354,8 +3354,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if (err)
goto err_out_free_irq;
- printk(KERN_INFO PFX DRV_VERSION " addr 0x%lx irq %d chip %s rev %d\n",
- pci_resource_start(pdev, 0), pdev->irq,
+ printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
skge_board_name(hw), hw->chip_rev);
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d3577871be28..e122007e16da 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3311,9 +3311,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
if (err)
goto err_out_iounmap;
- printk(KERN_INFO PFX "v%s addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n",
- DRV_VERSION, pci_resource_start(pdev, 0), pdev->irq,
- yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
+ printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
+ DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
+ pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
hw->chip_id, hw->chip_rev);
dev = sky2_init_netdev(hw, 0, using_dac);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5f743b972949..fc2468ecce0b 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2007,8 +2007,8 @@ static int __init de_init_one (struct pci_dev *pdev,
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
- pci_resource_len(pdev, 1), pci_name(pdev));
+ printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
goto err_out_res;
}
@@ -2016,8 +2016,9 @@ static int __init de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
- pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
+ printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index e0de66739a42..53fd9b56d0bd 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1350,10 +1350,10 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
+ printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
"aborting\n", pci_name(pdev),
- pci_resource_len (pdev, 0),
- pci_resource_start (pdev, 0));
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
goto err_out_free_netdev;
}
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 8fea2aa455d4..602a6e5002a0 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -212,26 +212,15 @@ Test with 'ping -s 10000' on a fast computer.
/*
PCI probe table.
*/
-enum pci_id_flags_bits {
- /* Set PCI command register bits before calling probe1(). */
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- /* Read and map the single following PCI BAR. */
- PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
- PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
-};
enum chip_capability_flags {
- CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
-#ifdef USE_IO_OPS
-#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
-#else
-#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
-#endif
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
+};
-static struct pci_device_id w840_pci_tbl[] = {
+static const struct pci_device_id w840_pci_tbl[] = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { 0, }
+ { }
};
MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
@@ -241,18 +230,17 @@ struct pci_id_info {
int pci, pci_mask, subsystem, subsystem_mask;
int revision, revision_mask; /* Only 8 bits. */
} id;
- enum pci_id_flags_bits pci_flags;
int io_size; /* Needed for I/O region check or ioremap(). */
int drv_flags; /* Driver use, intended as capability flags. */
};
static struct pci_id_info pci_id_tbl[] = {
{"Winbond W89c840", /* Sometime a Level-One switch card. */
{ 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
{"Winbond W89c840", { 0x08401050, 0xffffffff, },
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ 128, CanHaveMII | HasBrokenTx},
{"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ 128, CanHaveMII | HasBrokenTx},
{NULL,}, /* 0 terminated list. */
};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index e49e8b520c28..e24d2dafcf6c 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2568,9 +2568,10 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: %s at %s 0x%lx, ",
+ printk(KERN_INFO "%s: %s at %s 0x%llx, ",
dev->name, typhoon_card_info[card_id].name,
- use_mmio ? "MMIO" : "IO", pci_resource_start(pdev, use_mmio));
+ use_mmio ? "MMIO" : "IO",
+ (unsigned long long)pci_resource_start(pdev, use_mmio));
for(i = 0; i < 5; i++)
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x\n", dev->dev_addr[i]);
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index b60ef02db7b0..c92ac9fde083 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -7,7 +7,7 @@
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
- * For information see http://hq.pm.waw.pl/hdlc/
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64570 SCA User's Manual
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 4505540e3c59..04a376ec0ed8 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -732,15 +732,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
ioaddr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!ioaddr) {
- printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
- DRV_NAME, pci_resource_len(pdev, 0),
- pci_resource_start(pdev, 0));
+ printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
+ DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
+ (unsigned long long)pci_resource_start(pdev, 0));
rc = -EIO;
goto err_free_mmio_regions_2;
}
- printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
- pci_resource_start(pdev, 0),
- pci_resource_start(pdev, 1), pdev->irq);
+ printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
+ (unsigned long long)pci_resource_start(pdev, 0),
+ (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
/* Cf errata DS5 p.2 */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index b7d88db89a5c..e013b817cab8 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -7,7 +7,7 @@
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
- * For information see http://hq.pm.waw.pl/hdlc/
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Note: integrated CSU/DSU/DDS are not supported by this driver
*
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index a3e65d1bc19b..d7897ae89f90 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3445,9 +3445,9 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL);
if (card == NULL) {
- printk("PC300 found at RAM 0x%08lx, "
+ printk("PC300 found at RAM 0x%016llx, "
"but could not allocate card structure.\n",
- pci_resource_start(pdev, 3));
+ (unsigned long long)pci_resource_start(pdev, 3));
err = -ENOMEM;
goto err_disable_dev;
}
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 670e8bd2245c..24c3c57c13c9 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -7,7 +7,7 @@
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
- * For information see http://hq.pm.waw.pl/hdlc/
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
index 25ea4748f0b9..533993f538fc 100644
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ b/drivers/net/wireless/bcm43xx/Kconfig
@@ -2,6 +2,7 @@ config BCM43XX
tristate "Broadcom BCM43xx wireless support"
depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
select FW_LOADER
+ select HW_RANDOM
---help---
This is an experimental driver for the Broadcom 43xx wireless chip,
found in the Apple Airport Extreme and various other devices.
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index d8f917c21ea4..17a56828e232 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -1,6 +1,7 @@
#ifndef BCM43xx_H_
#define BCM43xx_H_
+#include <linux/hw_random.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -82,6 +83,7 @@
#define BCM43xx_MMIO_TSF_1 0x634 /* core rev < 3 only */
#define BCM43xx_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define BCM43xx_MMIO_TSF_3 0x638 /* core rev < 3 only */
+#define BCM43xx_MMIO_RNG 0x65A
#define BCM43xx_MMIO_POWERUP_DELAY 0x6A8
/* SPROM offsets. */
@@ -750,6 +752,10 @@ struct bcm43xx_private {
const struct firmware *initvals0;
const struct firmware *initvals1;
+ /* Random Number Generator. */
+ struct hwrng rng;
+ char rng_name[20 + 1];
+
/* Debugging stuff follows. */
#ifdef CONFIG_BCM43XX_DEBUG
struct bcm43xx_dfsentry *dfsentry;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 085d7857fe31..27bcf47228e2 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3237,6 +3237,39 @@ static void bcm43xx_security_init(struct bcm43xx_private *bcm)
bcm43xx_clear_keys(bcm);
}
+static int bcm43xx_rng_read(struct hwrng *rng, u32 *data)
+{
+ struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv;
+ unsigned long flags;
+
+ bcm43xx_lock_irqonly(bcm, flags);
+ *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG);
+ bcm43xx_unlock_irqonly(bcm, flags);
+
+ return (sizeof(u16));
+}
+
+static void bcm43xx_rng_exit(struct bcm43xx_private *bcm)
+{
+ hwrng_unregister(&bcm->rng);
+}
+
+static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
+{
+ int err;
+
+ snprintf(bcm->rng_name, ARRAY_SIZE(bcm->rng_name),
+ "%s_%s", KBUILD_MODNAME, bcm->net_dev->name);
+ bcm->rng.name = bcm->rng_name;
+ bcm->rng.data_read = bcm43xx_rng_read;
+ bcm->rng.priv = (unsigned long)bcm;
+ err = hwrng_register(&bcm->rng);
+ if (err)
+ printk(KERN_ERR PFX "RNG init failed (%d)\n", err);
+
+ return err;
+}
+
/* This is the opposite of bcm43xx_init_board() */
static void bcm43xx_free_board(struct bcm43xx_private *bcm)
{
@@ -3248,6 +3281,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
+ bcm43xx_rng_exit(bcm);
for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
if (!bcm->core_80211[i].available)
continue;
@@ -3325,6 +3359,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
bcm43xx_switch_core(bcm, &bcm->core_80211[0]);
bcm43xx_mac_enable(bcm);
}
+ err = bcm43xx_rng_init(bcm);
+ if (err)
+ goto err_80211_unwind;
bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
dprintk(KERN_INFO PFX "80211 cores initialized\n");
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 72335c8eb97f..94aeb23a7729 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1485,7 +1485,7 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
*
* Sending the PREPARE_FOR_POWER_DOWN will restrict the
* hardware from going into standby mode and will transition
- * out of D0-standy if it is already in that state.
+ * out of D0-standby if it is already in that state.
*
* STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
* driver upon completion. Once received, the driver can
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 081a8999666e..a8a8f975432f 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1229,12 +1229,6 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
return error;
}
-static void ipw_free_error_log(struct ipw_fw_error *error)
-{
- if (error)
- kfree(error);
-}
-
static ssize_t show_event_log(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -1296,10 +1290,9 @@ static ssize_t clear_error(struct device *d,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
- if (priv->error) {
- ipw_free_error_log(priv->error);
- priv->error = NULL;
- }
+
+ kfree(priv->error);
+ priv->error = NULL;
return count;
}
@@ -1970,8 +1963,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
struct ipw_fw_error *error =
ipw_alloc_error_log(priv);
ipw_dump_error_log(priv, error);
- if (error)
- ipw_free_error_log(error);
+ kfree(error);
}
#endif
} else {
@@ -11693,10 +11685,8 @@ static void ipw_pci_remove(struct pci_dev *pdev)
}
}
- if (priv->error) {
- ipw_free_error_log(priv->error);
- priv->error = NULL;
- }
+ kfree(priv->error);
+ priv->error = NULL;
#ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_prom_free(priv);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ecec8e5db786..569305f57561 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -234,14 +234,6 @@ See Packet Engines confidential appendix (prototype chips only).
-enum pci_id_flags_bits {
- /* Set PCI command register bits before calling probe1(). */
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- /* Read and map the single following PCI BAR. */
- PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
- PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
- PCI_UNUSED_IRQ=0x800,
-};
enum capability_flags {
HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
HasMACAddrBug=32, /* Only on early revs. */
@@ -249,11 +241,6 @@ enum capability_flags {
};
/* The PCI I/O space extent. */
#define YELLOWFIN_SIZE 0x100
-#ifdef USE_IO_OPS
-#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
-#else
-#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
-#endif
struct pci_id_info {
const char *name;
@@ -261,24 +248,23 @@ struct pci_id_info {
int pci, pci_mask, subsystem, subsystem_mask;
int revision, revision_mask; /* Only 8 bits. */
} id;
- enum pci_id_flags_bits pci_flags;
int io_size; /* Needed for I/O region check or ioremap(). */
int drv_flags; /* Driver use, intended as capability flags. */
};
static const struct pci_id_info pci_id_tbl[] = {
{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
- PCI_IOTYPE, YELLOWFIN_SIZE,
+ YELLOWFIN_SIZE,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
- PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
- {NULL,},
+ YELLOWFIN_SIZE, HasMII | DontUseEeprom },
+ { }
};
-static struct pci_device_id yellowfin_pci_tbl[] = {
+static const struct pci_device_id yellowfin_pci_tbl[] = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { 0, }
+ { }
};
MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 6e8ed0c81a6c..ce0a6ebcff15 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -299,7 +299,7 @@ struct pci_port_ops dino_port_ops = {
static void dino_disable_irq(unsigned int irq)
{
- struct dino_device *dino_dev = irq_desc[irq].handler_data;
+ struct dino_device *dino_dev = irq_desc[irq].chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
@@ -311,7 +311,7 @@ static void dino_disable_irq(unsigned int irq)
static void dino_enable_irq(unsigned int irq)
{
- struct dino_device *dino_dev = irq_desc[irq].handler_data;
+ struct dino_device *dino_dev = irq_desc[irq].chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 9d3bd15bf53b..58f0ce8d78e0 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -350,7 +350,7 @@ static int __devinit eisa_probe(struct parisc_device *dev)
irq_desc[2].action = &irq2_action;
for (i = 0; i < 16; i++) {
- irq_desc[i].handler = &eisa_interrupt_type;
+ irq_desc[i].chip = &eisa_interrupt_type;
}
EISA_bus = 1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 16d40f95978d..5476ba7709b3 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -109,7 +109,7 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
static void gsc_asic_disable_irq(unsigned int irq)
{
- struct gsc_asic *irq_dev = irq_desc[irq].handler_data;
+ struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -124,7 +124,7 @@ static void gsc_asic_disable_irq(unsigned int irq)
static void gsc_asic_enable_irq(unsigned int irq)
{
- struct gsc_asic *irq_dev = irq_desc[irq].handler_data;
+ struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -164,8 +164,8 @@ int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
if (irq > GSC_IRQ_MAX)
return NO_IRQ;
- irq_desc[irq].handler = type;
- irq_desc[irq].handler_data = data;
+ irq_desc[irq].chip = type;
+ irq_desc[irq].chip_data = data;
return irq++;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 7a458d5bc751..1fbda77cefc2 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -619,7 +619,7 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
static struct vector_info *iosapic_get_vector(unsigned int irq)
{
- return irq_desc[irq].handler_data;
+ return irq_desc[irq].chip_data;
}
static void iosapic_disable_irq(unsigned int irq)
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 828eb45062de..a988dc7a9abd 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -360,7 +360,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
#endif
for (i = 0; i < 16; i++) {
- irq_desc[i].handler = &superio_interrupt_type;
+ irq_desc[i].chip = &superio_interrupt_type;
}
/*
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 1de52d9febf9..7352104f7b30 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -15,7 +15,7 @@
* Phil Blundell <philb@gnu.org>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
- * David Campbell <campbell@torque.net>
+ * David Campbell
* Andrea Arcangeli
*/
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index 662f6c1fee5d..fc9c37c54022 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -24,7 +24,7 @@
* Phil Blundell <Philip.Blundell@pobox.com>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
- * David Campbell <campbell@torque.net>
+ * David Campbell
* Andrea Arcangeli
*/
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 48bbf32fd980..7318e4a9e436 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3,7 +3,7 @@
* Authors: Phil Blundell <philb@gnu.org>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
- * David Campbell <campbell@torque.net>
+ * David Campbell
* Andrea Arcangeli
*
* based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 69a4bbd4cbee..7c43c5392bed 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -389,7 +389,7 @@ static struct of_device_id bpp_match[] = {
{},
};
-MODULE_DEVICE_TABLE(of, qec_sbus_match);
+MODULE_DEVICE_TABLE(of, bpp_match);
static struct of_platform_driver bpp_sbus_driver = {
.name = "bpp",
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index cbe17184b9c7..8610ae88b92d 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -1,6 +1,6 @@
/* Sysctl interface for parport devices.
*
- * Authors: David Campbell <campbell@torque.net>
+ * Authors: David Campbell
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Philip Blundell <philb@gnu.org>
* Andrea Arcangeli
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 723092682023..5f7db9d2436e 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -34,11 +34,11 @@
*/
int
pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
- unsigned long size, unsigned long align, unsigned long min,
- unsigned int type_mask,
- void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
- void *alignf_data)
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned int type_mask,
+ void (*alignf)(void *, struct resource *, resource_size_t,
+ resource_size_t),
+ void *alignf_data)
{
int i, ret = -ENOMEM;
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index f7cb00da38df..1ec165df8522 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -95,8 +95,8 @@ static int zt5550_hc_config(struct pci_dev *pdev)
hc_dev = pdev;
dbg("hc_dev = %p", hc_dev);
- dbg("pci resource start %lx", pci_resource_start(hc_dev, 1));
- dbg("pci resource len %lx", pci_resource_len(hc_dev, 1));
+ dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
+ dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
if(!request_mem_region(pci_resource_start(hc_dev, 1),
pci_resource_len(hc_dev, 1), MY_NAME)) {
@@ -108,8 +108,9 @@ static int zt5550_hc_config(struct pci_dev *pdev)
hc_registers =
ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
if(!hc_registers) {
- err("cannot remap MMIO region %lx @ %lx",
- pci_resource_len(hc_dev, 1), pci_resource_start(hc_dev, 1));
+ err("cannot remap MMIO region %llx @ %llx",
+ (unsigned long long)pci_resource_len(hc_dev, 1),
+ (unsigned long long)pci_resource_start(hc_dev, 1));
ret = -ENODEV;
goto exit_release_region;
}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 9bc1deb8df52..f8658d63f077 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1089,8 +1089,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
dbg("pdev = %p\n", pdev);
- dbg("pci resource start %lx\n", pci_resource_start(pdev, 0));
- dbg("pci resource len %lx\n", pci_resource_len(pdev, 0));
+ dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
+ dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), MY_NAME)) {
@@ -1102,9 +1102,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ctrl->hpc_reg = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!ctrl->hpc_reg) {
- err("cannot remap MMIO region %lx @ %lx\n",
- pci_resource_len(pdev, 0),
- pci_resource_start(pdev, 0));
+ err("cannot remap MMIO region %llx @ %llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0),
+ (unsigned long long)pci_resource_start(pdev, 0));
rc = -ENODEV;
goto err_free_mem_region;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index d77138ecb098..11f7858f0064 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1398,8 +1398,9 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
for ( rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++)
if (pci_resource_len(pdev, rc) > 0)
- dbg("pci resource[%d] start=0x%lx(len=0x%lx)\n", rc,
- pci_resource_start(pdev, rc), pci_resource_len(pdev, rc));
+ dbg("pci resource[%d] start=0x%llx(len=0x%llx)\n", rc,
+ (unsigned long long)pci_resource_start(pdev, rc),
+ (unsigned long long)pci_resource_len(pdev, rc));
info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index f5cfbf2c047c..620e1139e607 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -51,8 +51,10 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
res = bus->resource[index];
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8lx, length = %8.8lx\n",
- res->start, (res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, "
+ "length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: prefetchable memory\n");
@@ -60,16 +62,20 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
res = bus->resource[index];
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8lx, length = %8.8lx\n",
- res->start, (res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, "
+ "length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: IO\n");
for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
res = bus->resource[index];
if (res && (res->flags & IORESOURCE_IO)) {
- out += sprintf(out, "start = %8.8lx, length = %8.8lx\n",
- res->start, (res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, "
+ "length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: bus numbers\n");
diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c
index 0eb5fe9003a2..5ed798b319c7 100644
--- a/drivers/pci/msi-apic.c
+++ b/drivers/pci/msi-apic.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
+#include <asm/smp.h>
#include "msi.h"
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7f8429284fab..76d023d8a33b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -429,12 +429,12 @@ static void irq_handler_init(int cap_id, int pos, int mask)
spin_lock_irqsave(&irq_desc[pos].lock, flags);
if (cap_id == PCI_CAP_ID_MSIX)
- irq_desc[pos].handler = &msix_irq_type;
+ irq_desc[pos].chip = &msix_irq_type;
else {
if (!mask)
- irq_desc[pos].handler = &msi_irq_wo_maskbit_type;
+ irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
else
- irq_desc[pos].handler = &msi_irq_w_maskbit_type;
+ irq_desc[pos].chip = &msi_irq_w_maskbit_type;
}
spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bc405c035ce3..606f9b6f70eb 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -87,7 +87,7 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
char * str = buf;
int i;
int max = 7;
- u64 start, end;
+ resource_size_t start, end;
if (pci_dev->subordinate)
max = DEVICE_COUNT_RESOURCE;
@@ -365,7 +365,7 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct device, kobj));
struct resource *res = (struct resource *)attr->private;
enum pci_mmap_state mmap_type;
- u64 start, end;
+ resource_size_t start, end;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d408a3c30426..cf57d7de3765 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -369,7 +369,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
- * Firmware method after natice method ?
+ * Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
@@ -691,10 +691,12 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
return 0;
err_out:
- printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
+ printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx "
+ "for device %s\n",
pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
bar + 1, /* PCI BAR # */
- pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
+ (unsigned long long)pci_resource_len(pdev, bar),
+ (unsigned long long)pci_resource_start(pdev, bar),
pci_name(pdev));
return -EBUSY;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 29bdeca031a8..9cc842b666eb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,10 +6,10 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_cleanup_rom(struct pci_dev *dev);
extern int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
- unsigned long size, unsigned long align,
- unsigned long min, unsigned int type_mask,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned int type_mask,
void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
+ resource_size_t, resource_size_t),
void *alignf_data);
/* Firmware callbacks */
extern int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 54b2ebc9c91a..99cf33379769 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -302,12 +302,6 @@ static struct file_operations proc_bus_pci_operations = {
#endif /* HAVE_PCI_MMAP */
};
-#if BITS_PER_LONG == 32
-#define LONG_FORMAT "\t%08lx"
-#else
-#define LONG_FORMAT "\t%16lx"
-#endif
-
/* iterator */
static void *pci_seq_start(struct seq_file *m, loff_t *pos)
{
@@ -356,18 +350,18 @@ static int show_device(struct seq_file *m, void *v)
dev->irq);
/* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */
for (i=0; i<7; i++) {
- u64 start, end;
+ resource_size_t start, end;
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
- seq_printf(m, LONG_FORMAT,
- ((unsigned long)start) |
- (dev->resource[i].flags & PCI_REGION_FLAG_MASK));
+ seq_printf(m, "\t%16llx",
+ (unsigned long long)(start |
+ (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
}
for (i=0; i<7; i++) {
- u64 start, end;
+ resource_size_t start, end;
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
- seq_printf(m, LONG_FORMAT,
+ seq_printf(m, "\t%16llx",
dev->resource[i].start < dev->resource[i].end ?
- (unsigned long)(end - start) + 1 : 0);
+ (unsigned long long)(end - start) + 1 : 0);
}
seq_putc(m, '\t');
if (drv)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 598a115cd00e..cbb69cf41311 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -80,8 +80,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
} else {
if (res->flags & IORESOURCE_ROM_COPY) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- return (void __iomem *)pci_resource_start(pdev,
- PCI_ROM_RESOURCE);
+ return (void __iomem *)(unsigned long)
+ pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
if (res->parent == NULL &&
@@ -170,11 +170,11 @@ void __iomem *pci_map_rom_copy(struct pci_dev *pdev, size_t *size)
return rom;
res->end = res->start + *size;
- memcpy_fromio((void*)res->start, rom, *size);
+ memcpy_fromio((void*)(unsigned long)res->start, rom, *size);
pci_unmap_rom(pdev, rom);
res->flags |= IORESOURCE_ROM_COPY;
- return (void __iomem *)res->start;
+ return (void __iomem *)(unsigned long)res->start;
}
/**
@@ -227,7 +227,7 @@ void pci_cleanup_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->flags & IORESOURCE_ROM_COPY) {
- kfree((void*)res->start);
+ kfree((void*)(unsigned long)res->start);
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
res->end = 0;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 35086e80faa9..47c1071ad84e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -357,8 +357,10 @@ pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
order = __ffs(align) - 20;
if (order > 11) {
printk(KERN_WARNING "PCI: region %s/%d "
- "too large: %lx-%lx\n",
- pci_name(dev), i, r->start, r->end);
+ "too large: %llx-%llx\n",
+ pci_name(dev), i,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end);
r->flags = 0;
continue;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 577f4b55c46d..ab78e4bbdd83 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -40,8 +40,9 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
pcibios_resource_to_bus(dev, &region, res);
- pr_debug(" got res [%lx:%lx] bus [%lx:%lx] flags %lx for "
- "BAR %d of %s\n", res->start, res->end,
+ pr_debug(" got res [%llx:%llx] bus [%lx:%lx] flags %lx for "
+ "BAR %d of %s\n", (unsigned long long)res->start,
+ (unsigned long long)res->end,
region.start, region.end, res->flags, resno, pci_name(dev));
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
@@ -104,10 +105,12 @@ pci_claim_resource(struct pci_dev *dev, int resource)
err = insert_resource(root, res);
if (err) {
- printk(KERN_ERR "PCI: %s region %d of %s %s [%lx:%lx]\n",
- root ? "Address space collision on" :
- "No parent found for",
- resource, dtype, pci_name(dev), res->start, res->end);
+ printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n",
+ root ? "Address space collision on" :
+ "No parent found for",
+ resource, dtype, pci_name(dev),
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
}
return err;
@@ -118,7 +121,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
- unsigned long size, min, align;
+ resource_size_t size, min, align;
int ret;
size = res->end - res->start + 1;
@@ -145,9 +148,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
if (ret) {
- printk(KERN_ERR "PCI: Failed to allocate %s resource #%d:%lx@%lx for %s\n",
- res->flags & IORESOURCE_IO ? "I/O" : "mem",
- resno, size, res->start, pci_name(dev));
+ printk(KERN_ERR "PCI: Failed to allocate %s resource "
+ "#%d:%llx@%llx for %s\n",
+ res->flags & IORESOURCE_IO ? "I/O" : "mem",
+ resno, (unsigned long long)size,
+ (unsigned long long)res->start, pci_name(dev));
} else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, res, resno);
}
@@ -204,7 +209,7 @@ pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r;
struct resource_list *list, *tmp;
- unsigned long r_align;
+ resource_size_t r_align;
r = &dev->resource[i];
r_align = r->end - r->start;
@@ -213,13 +218,14 @@ pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
continue;
if (!r_align) {
printk(KERN_WARNING "PCI: Ignore bogus resource %d "
- "[%lx:%lx] of %s\n",
- i, r->start, r->end, pci_name(dev));
+ "[%llx:%llx] of %s\n",
+ i, (unsigned long long)r->start,
+ (unsigned long long)r->end, pci_name(dev));
continue;
}
r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
for (list = head; ; list = list->next) {
- unsigned long align = 0;
+ resource_size_t align = 0;
struct resource_list *ln = list->next;
int idx;
diff --git a/drivers/pcmcia/hd64465_ss.c b/drivers/pcmcia/hd64465_ss.c
index b39435bbfaeb..c662e4f89d46 100644
--- a/drivers/pcmcia/hd64465_ss.c
+++ b/drivers/pcmcia/hd64465_ss.c
@@ -244,8 +244,8 @@ static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
hs_mapped_irq[irq].sock = sp;
/* insert ourselves as the irq controller */
- hs_mapped_irq[irq].old_handler = irq_desc[irq].handler;
- irq_desc[irq].handler = &hd64465_ss_irq_type;
+ hs_mapped_irq[irq].old_handler = irq_desc[irq].chip;
+ irq_desc[irq].chip = &hd64465_ss_irq_type;
}
@@ -260,7 +260,7 @@ static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
return;
/* restore the original irq controller */
- irq_desc[irq].handler = hs_mapped_irq[irq].old_handler;
+ irq_desc[irq].chip = hs_mapped_irq[irq].old_handler;
}
/*============================================================*/
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index a2f05f485156..ff51a65d9433 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1084,9 +1084,10 @@ static int i365_set_mem_map(u_short sock, struct pccard_mem_map *mem)
u_short base, i;
u_char map;
- debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, %#lx-%#lx, "
+ debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, "
"%#x)\n", sock, mem->map, mem->flags, mem->speed,
- mem->res->start, mem->res->end, mem->card_start);
+ (unsigned long long)mem->res->start,
+ (unsigned long long)mem->res->end, mem->card_start);
map = mem->map;
if ((map > 4) || (mem->card_start > 0x3ffffff) ||
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 0e07d9535116..d0f68ab8f041 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -157,7 +157,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int pcmcia_schlvl = PCMCIA_SCHLVL;
-static spinlock_t events_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(events_lock);
#define PCMCIA_SOCKET_KEY_5V 1
@@ -644,7 +644,7 @@ static struct platform_device m8xx_device = {
};
static u32 pending_events[PCMCIA_SOCKETS_NO];
-static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pending_event_lock);
static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs)
{
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 247ab837f841..9ee26c1b8635 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -642,7 +642,8 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
goto err_out_free_mem;
printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge "
- "at 0x%lx on irq %d\n", pci_resource_start(dev, 0), dev->irq);
+ "at 0x%llx on irq %d\n",
+ (unsigned long long)pci_resource_start(dev, 0), dev->irq);
/*
* Since we have no memory BARs some firmware may not
* have had PCI_COMMAND_MEMORY enabled, yet the device needs it.
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 0f8b157c9717..c3176b16b7be 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -72,7 +72,7 @@ static DEFINE_MUTEX(rsrc_mutex);
======================================================================*/
static struct resource *
-make_resource(unsigned long b, unsigned long n, int flags, char *name)
+make_resource(resource_size_t b, resource_size_t n, int flags, char *name)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
@@ -86,8 +86,8 @@ make_resource(unsigned long b, unsigned long n, int flags, char *name)
}
static struct resource *
-claim_region(struct pcmcia_socket *s, unsigned long base, unsigned long size,
- int type, char *name)
+claim_region(struct pcmcia_socket *s, resource_size_t base,
+ resource_size_t size, int type, char *name)
{
struct resource *res, *parent;
@@ -519,10 +519,10 @@ struct pcmcia_align_data {
static void
pcmcia_common_align(void *align_data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
struct pcmcia_align_data *data = align_data;
- unsigned long start;
+ resource_size_t start;
/*
* Ensure that we have the correct start address
*/
@@ -533,8 +533,8 @@ pcmcia_common_align(void *align_data, struct resource *res,
}
static void
-pcmcia_align(void *align_data, struct resource *res,
- unsigned long size, unsigned long align)
+pcmcia_align(void *align_data, struct resource *res, resource_size_t size,
+ resource_size_t align)
{
struct pcmcia_align_data *data = align_data;
struct resource_map *m;
@@ -808,8 +808,10 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
if (res->flags & IORESOURCE_IO) {
if (res == &ioport_resource)
continue;
- printk(KERN_INFO "pcmcia: parent PCI bridge I/O window: 0x%lx - 0x%lx\n",
- res->start, res->end);
+ printk(KERN_INFO "pcmcia: parent PCI bridge I/O "
+ "window: 0x%llx - 0x%llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end))
done |= IORESOURCE_IO;
@@ -818,8 +820,10 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
if (res->flags & IORESOURCE_MEM) {
if (res == &iomem_resource)
continue;
- printk(KERN_INFO "pcmcia: parent PCI bridge Memory window: 0x%lx - 0x%lx\n",
- res->start, res->end);
+ printk(KERN_INFO "pcmcia: parent PCI bridge Memory "
+ "window: 0x%llx - 0x%llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end))
done |= IORESOURCE_MEM;
}
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 73bad1d5cb23..65a60671659f 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -756,8 +756,9 @@ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
u_long base, len, mmap;
debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx, %#x)\n", psock, mem->map, mem->flags,
- mem->speed, mem->res->start, mem->res->end, mem->card_start);
+ "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags,
+ mem->speed, (unsigned long long)mem->res->start,
+ (unsigned long long)mem->res->end, mem->card_start);
if ((mem->map > 3) || (mem->card_start > 0x3ffffff) ||
(mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) ||
(mem->res->start > mem->res->end) || (mem->speed > 1000))
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index a2d8ce7fef9c..3163e3d73da1 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -264,7 +264,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
if (pnp_port_flags(dev, i) & IORESOURCE_DISABLED)
pnp_printf(buffer," disabled\n");
else
- pnp_printf(buffer," 0x%lx-0x%lx\n",
+ pnp_printf(buffer," 0x%llx-0x%llx\n",
pnp_port_start(dev, i),
pnp_port_end(dev, i));
}
@@ -275,7 +275,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
if (pnp_mem_flags(dev, i) & IORESOURCE_DISABLED)
pnp_printf(buffer," disabled\n");
else
- pnp_printf(buffer," 0x%lx-0x%lx\n",
+ pnp_printf(buffer," 0x%llx-0x%llx\n",
pnp_mem_start(dev, i),
pnp_mem_end(dev, i));
}
@@ -286,7 +286,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
if (pnp_irq_flags(dev, i) & IORESOURCE_DISABLED)
pnp_printf(buffer," disabled\n");
else
- pnp_printf(buffer," %ld\n",
+ pnp_printf(buffer," %lld\n",
pnp_irq(dev, i));
}
}
@@ -296,7 +296,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
if (pnp_dma_flags(dev, i) & IORESOURCE_DISABLED)
pnp_printf(buffer," disabled\n");
else
- pnp_printf(buffer," %ld\n",
+ pnp_printf(buffer," %lld\n",
pnp_dma(dev, i));
}
}
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 6fff109bdab6..1d7a5b87f4cb 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -20,7 +20,8 @@ DECLARE_MUTEX(pnp_res_mutex);
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
- unsigned long *start, *end, *flags;
+ resource_size_t *start, *end;
+ unsigned long *flags;
if (!dev || !rule)
return -EINVAL;
@@ -63,7 +64,8 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
{
- unsigned long *start, *end, *flags;
+ resource_size_t *start, *end;
+ unsigned long *flags;
if (!dev || !rule)
return -EINVAL;
@@ -116,7 +118,8 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
{
- unsigned long *start, *end, *flags;
+ resource_size_t *start, *end;
+ unsigned long *flags;
int i;
/* IRQ priority: this table is good for i386 */
@@ -168,7 +171,8 @@ static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
{
- unsigned long *start, *end, *flags;
+ resource_size_t *start, *end;
+ unsigned long *flags;
int i;
/* DMA priority: this table is good for i386 */
@@ -582,7 +586,8 @@ int pnp_disable_dev(struct pnp_dev *dev)
* @size: size of region
*
*/
-void pnp_resource_change(struct resource *resource, unsigned long start, unsigned long size)
+void pnp_resource_change(struct resource *resource, resource_size_t start,
+ resource_size_t size)
{
if (resource == NULL)
return;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 6ded527169f4..7bb892f58cc0 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -241,7 +241,7 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
- unsigned long *port, *end, *tport, *tend;
+ resource_size_t *port, *end, *tport, *tend;
port = &dev->res.port_resource[idx].start;
end = &dev->res.port_resource[idx].end;
@@ -297,7 +297,7 @@ int pnp_check_mem(struct pnp_dev * dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
- unsigned long *addr, *end, *taddr, *tend;
+ resource_size_t *addr, *end, *taddr, *tend;
addr = &dev->res.mem_resource[idx].start;
end = &dev->res.mem_resource[idx].end;
@@ -358,7 +358,7 @@ int pnp_check_irq(struct pnp_dev * dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
- unsigned long * irq = &dev->res.irq_resource[idx].start;
+ resource_size_t * irq = &dev->res.irq_resource[idx].start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(dev->res.irq_resource[idx].flags))
@@ -423,7 +423,7 @@ int pnp_check_dma(struct pnp_dev * dev, int idx)
#ifndef CONFIG_IA64
int tmp;
struct pnp_dev *tdev;
- unsigned long * dma = &dev->res.dma_resource[idx].start;
+ resource_size_t * dma = &dev->res.dma_resource[idx].start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(dev->res.dma_resource[idx].flags))
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index b9fab2ae3a36..8b56bbdd011e 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -17,8 +17,8 @@
* These interrupt-safe spinlocks protect all accesses to RIO
* configuration space and doorbell access.
*/
-static spinlock_t rio_config_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t rio_doorbell_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rio_config_lock);
+static DEFINE_SPINLOCK(rio_doorbell_lock);
/*
* Wrappers for all RIO configuration access functions. They just check
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index bccff400b198..f2fc81a9074d 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -162,6 +162,16 @@ config RTC_DRV_PCF8583
This driver can also be built as a module. If so, the module
will be called rtc-pcf8583.
+config RTC_DRV_RS5C348
+ tristate "Ricoh RS5C348A/B"
+ depends on RTC_CLASS && SPI
+ help
+ If you say yes here you get support for the
+ Ricoh RS5C348A and RS5C348B RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rs5c348.
+
config RTC_DRV_RS5C372
tristate "Ricoh RS5C372A/B"
depends on RTC_CLASS && I2C
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 900d210dd1a2..da5e38774e13 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
+obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5396beec30d0..1cb61a761cb2 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -94,7 +94,9 @@ exit_kfree:
kfree(rtc);
exit_idr:
+ mutex_lock(&idr_lock);
idr_remove(&rtc_idr, id);
+ mutex_unlock(&idr_lock);
exit:
dev_err(dev, "rtc core: unable to register %s, err = %d\n",
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ecafbad41a24..762521a1419c 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -226,7 +226,7 @@ static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq < 0)
- return -ENOIOCTLCMD;
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
switch (cmd) {
case RTC_AIE_OFF:
pdata->irqen &= ~RTC_AF;
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
new file mode 100644
index 000000000000..0964d1dba925
--- /dev/null
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -0,0 +1,246 @@
+/*
+ * A SPI driver for the Ricoh RS5C348 RTC
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The board specific init code should provide characteristics of this
+ * device:
+ * Mode 1 (High-Active, Shift-Then-Sample), High Avtive CS
+ */
+
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/rtc.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.1"
+
+#define RS5C348_REG_SECS 0
+#define RS5C348_REG_MINS 1
+#define RS5C348_REG_HOURS 2
+#define RS5C348_REG_WDAY 3
+#define RS5C348_REG_DAY 4
+#define RS5C348_REG_MONTH 5
+#define RS5C348_REG_YEAR 6
+#define RS5C348_REG_CTL1 14
+#define RS5C348_REG_CTL2 15
+
+#define RS5C348_SECS_MASK 0x7f
+#define RS5C348_MINS_MASK 0x7f
+#define RS5C348_HOURS_MASK 0x3f
+#define RS5C348_WDAY_MASK 0x03
+#define RS5C348_DAY_MASK 0x3f
+#define RS5C348_MONTH_MASK 0x1f
+
+#define RS5C348_BIT_PM 0x20 /* REG_HOURS */
+#define RS5C348_BIT_Y2K 0x80 /* REG_MONTH */
+#define RS5C348_BIT_24H 0x20 /* REG_CTL1 */
+#define RS5C348_BIT_XSTP 0x10 /* REG_CTL2 */
+#define RS5C348_BIT_VDET 0x40 /* REG_CTL2 */
+
+#define RS5C348_CMD_W(addr) (((addr) << 4) | 0x08) /* single write */
+#define RS5C348_CMD_R(addr) (((addr) << 4) | 0x0c) /* single read */
+#define RS5C348_CMD_MW(addr) (((addr) << 4) | 0x00) /* burst write */
+#define RS5C348_CMD_MR(addr) (((addr) << 4) | 0x04) /* burst read */
+
+struct rs5c348_plat_data {
+ struct rtc_device *rtc;
+ int rtc_24h;
+};
+
+static int
+rs5c348_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+ u8 txbuf[5+7], *txp;
+ int ret;
+
+ /* Transfer 5 bytes before writing SEC. This gives 31us for carry. */
+ txp = txbuf;
+ txbuf[0] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
+ txbuf[1] = 0; /* dummy */
+ txbuf[2] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
+ txbuf[3] = 0; /* dummy */
+ txbuf[4] = RS5C348_CMD_MW(RS5C348_REG_SECS); /* cmd, sec, ... */
+ txp = &txbuf[5];
+ txp[RS5C348_REG_SECS] = BIN2BCD(tm->tm_sec);
+ txp[RS5C348_REG_MINS] = BIN2BCD(tm->tm_min);
+ if (pdata->rtc_24h) {
+ txp[RS5C348_REG_HOURS] = BIN2BCD(tm->tm_hour);
+ } else {
+ /* hour 0 is AM12, noon is PM12 */
+ txp[RS5C348_REG_HOURS] = BIN2BCD((tm->tm_hour + 11) % 12 + 1) |
+ (tm->tm_hour >= 12 ? RS5C348_BIT_PM : 0);
+ }
+ txp[RS5C348_REG_WDAY] = BIN2BCD(tm->tm_wday);
+ txp[RS5C348_REG_DAY] = BIN2BCD(tm->tm_mday);
+ txp[RS5C348_REG_MONTH] = BIN2BCD(tm->tm_mon + 1) |
+ (tm->tm_year >= 100 ? RS5C348_BIT_Y2K : 0);
+ txp[RS5C348_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+ /* write in one transfer to avoid data inconsistency */
+ ret = spi_write_then_read(spi, txbuf, sizeof(txbuf), NULL, 0);
+ udelay(62); /* Tcsr 62us */
+ return ret;
+}
+
+static int
+rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+ u8 txbuf[5], rxbuf[7];
+ int ret;
+
+ /* Transfer 5 byte befores reading SEC. This gives 31us for carry. */
+ txbuf[0] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
+ txbuf[1] = 0; /* dummy */
+ txbuf[2] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
+ txbuf[3] = 0; /* dummy */
+ txbuf[4] = RS5C348_CMD_MR(RS5C348_REG_SECS); /* cmd, sec, ... */
+
+ /* read in one transfer to avoid data inconsistency */
+ ret = spi_write_then_read(spi, txbuf, sizeof(txbuf),
+ rxbuf, sizeof(rxbuf));
+ udelay(62); /* Tcsr 62us */
+ if (ret < 0)
+ return ret;
+
+ tm->tm_sec = BCD2BIN(rxbuf[RS5C348_REG_SECS] & RS5C348_SECS_MASK);
+ tm->tm_min = BCD2BIN(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
+ tm->tm_hour = BCD2BIN(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
+ if (!pdata->rtc_24h) {
+ tm->tm_hour %= 12;
+ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
+ tm->tm_hour += 12;
+ }
+ tm->tm_wday = BCD2BIN(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
+ tm->tm_mday = BCD2BIN(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
+ tm->tm_mon =
+ BCD2BIN(rxbuf[RS5C348_REG_MONTH] & RS5C348_MONTH_MASK) - 1;
+ /* year is 1900 + tm->tm_year */
+ tm->tm_year = BCD2BIN(rxbuf[RS5C348_REG_YEAR]) +
+ ((rxbuf[RS5C348_REG_MONTH] & RS5C348_BIT_Y2K) ? 100 : 0);
+
+ if (rtc_valid_tm(tm) < 0) {
+ dev_err(&spi->dev, "retrieved date/time is not valid.\n");
+ rtc_time_to_tm(0, tm);
+ }
+
+ return 0;
+}
+
+static struct rtc_class_ops rs5c348_rtc_ops = {
+ .read_time = rs5c348_rtc_read_time,
+ .set_time = rs5c348_rtc_set_time,
+};
+
+static struct spi_driver rs5c348_driver;
+
+static int __devinit rs5c348_probe(struct spi_device *spi)
+{
+ int ret;
+ struct rtc_device *rtc;
+ struct rs5c348_plat_data *pdata;
+
+ pdata = kzalloc(sizeof(struct rs5c348_plat_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ spi->dev.platform_data = pdata;
+
+ /* Check D7 of SECOND register */
+ ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_SECS));
+ if (ret < 0 || (ret & 0x80)) {
+ dev_err(&spi->dev, "not found.\n");
+ goto kfree_exit;
+ }
+
+ dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
+ dev_info(&spi->dev, "spiclk %u KHz.\n",
+ (spi->max_speed_hz + 500) / 1000);
+
+ /* turn RTC on if it was not on */
+ ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_CTL2));
+ if (ret < 0)
+ goto kfree_exit;
+ if (ret & (RS5C348_BIT_XSTP | RS5C348_BIT_VDET)) {
+ u8 buf[2];
+ if (ret & RS5C348_BIT_VDET)
+ dev_warn(&spi->dev, "voltage-low detected.\n");
+ buf[0] = RS5C348_CMD_W(RS5C348_REG_CTL2);
+ buf[1] = 0;
+ ret = spi_write_then_read(spi, buf, sizeof(buf), NULL, 0);
+ if (ret < 0)
+ goto kfree_exit;
+ }
+
+ ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_CTL1));
+ if (ret < 0)
+ goto kfree_exit;
+ if (ret & RS5C348_BIT_24H)
+ pdata->rtc_24h = 1;
+
+ rtc = rtc_device_register(rs5c348_driver.driver.name, &spi->dev,
+ &rs5c348_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto kfree_exit;
+ }
+
+ pdata->rtc = rtc;
+
+ return 0;
+ kfree_exit:
+ kfree(pdata);
+ return ret;
+}
+
+static int __devexit rs5c348_remove(struct spi_device *spi)
+{
+ struct rs5c348_plat_data *pdata = spi->dev.platform_data;
+ struct rtc_device *rtc = pdata->rtc;
+
+ if (rtc)
+ rtc_device_unregister(rtc);
+ kfree(pdata);
+ return 0;
+}
+
+static struct spi_driver rs5c348_driver = {
+ .driver = {
+ .name = "rs5c348",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = rs5c348_probe,
+ .remove = __devexit_p(rs5c348_remove),
+};
+
+static __init int rs5c348_init(void)
+{
+ return spi_register_driver(&rs5c348_driver);
+}
+
+static __exit void rs5c348_exit(void)
+{
+ spi_unregister_driver(&rs5c348_driver);
+}
+
+module_init(rs5c348_init);
+module_exit(rs5c348_exit);
+
+MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
+MODULE_DESCRIPTION("Ricoh RS5C348 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index ab486fbc828d..9cd1cb304bb2 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -45,7 +45,7 @@
static unsigned long rtc_freq = 1024;
static struct rtc_time rtc_alarm;
-static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(sa1100_rtc_lock);
static int rtc_update_alarm(struct rtc_time *alrm)
{
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 33e029207e26..4b9291dd4443 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -93,7 +93,7 @@ static void __iomem *rtc2_base;
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-static spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rtc_lock);
static char rtc_name[] = "RTC";
static unsigned long periodic_frequency;
static unsigned long periodic_count;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index cfb1fff3787c..bafcd2f20ae2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -95,7 +95,7 @@ dasd_alloc_device(void)
spin_lock_init(&device->mem_lock);
spin_lock_init(&device->request_queue_lock);
atomic_set (&device->tasklet_scheduled, 0);
- tasklet_init(&device->tasklet,
+ tasklet_init(&device->tasklet,
(void (*)(unsigned long)) dasd_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
@@ -128,7 +128,7 @@ dasd_state_new_to_known(struct dasd_device *device)
int rc;
/*
- * As long as the device is not in state DASD_STATE_NEW we want to
+ * As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0.
*/
dasd_get_device(device);
@@ -336,7 +336,7 @@ dasd_decrease_state(struct dasd_device *device)
if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY)
dasd_state_online_to_ready(device);
-
+
if (device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
dasd_state_ready_to_basic(device);
@@ -348,7 +348,7 @@ dasd_decrease_state(struct dasd_device *device)
if (device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
dasd_state_basic_to_known(device);
-
+
if (device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW)
dasd_state_known_to_new(device);
@@ -994,7 +994,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
/* Find out the appropriate era_action. */
- if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
+ if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
era = dasd_era_fatal;
else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
irb->scsw.cstat == 0 &&
@@ -1004,7 +1004,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
era = dasd_era_fatal; /* don't recover this request */
else if (irb->esw.esw0.erw.cons)
era = device->discipline->examine_error(cqr, irb);
- else
+ else
era = dasd_era_recover;
DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
@@ -1287,7 +1287,7 @@ __dasd_start_head(struct dasd_device * device)
}
/*
- * Remove requests from the ccw queue.
+ * Remove requests from the ccw queue.
*/
static void
dasd_flush_ccw_queue(struct dasd_device * device, int all)
@@ -1450,23 +1450,23 @@ dasd_sleep_on(struct dasd_ccw_req * cqr)
wait_queue_head_t wait_q;
struct dasd_device *device;
int rc;
-
+
device = cqr->device;
spin_lock_irq(get_ccwdev_lock(device->cdev));
-
+
init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q;
cqr->status = DASD_CQR_QUEUED;
list_add_tail(&cqr->list, &device->ccw_queue);
-
+
/* let the bh start the request to keep them in order */
dasd_schedule_bh(device);
-
+
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr));
-
+
/* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
return rc;
@@ -1568,7 +1568,7 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
wait_queue_head_t wait_q;
struct dasd_device *device;
int rc;
-
+
device = cqr->device;
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = _dasd_term_running_cqr(device);
@@ -1576,20 +1576,20 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
-
+
init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q;
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->list, &device->ccw_queue);
-
+
/* let the bh start the request to keep them in order */
dasd_schedule_bh(device);
-
+
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr));
-
+
/* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
return rc;
@@ -1725,7 +1725,7 @@ dasd_flush_request_queue(struct dasd_device * device)
if (!device->request_queue)
return;
-
+
spin_lock_irq(&device->request_queue_lock);
while (!list_empty(&device->request_queue->queue_head)) {
req = elv_next_request(device->request_queue);
@@ -1855,15 +1855,34 @@ dasd_generic_probe (struct ccw_device *cdev,
{
int ret;
+ ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
+ if (ret) {
+ printk(KERN_WARNING
+ "dasd_generic_probe: could not set ccw-device options "
+ "for %s\n", cdev->dev.bus_id);
+ return ret;
+ }
ret = dasd_add_sysfs_files(cdev);
if (ret) {
printk(KERN_WARNING
"dasd_generic_probe: could not add sysfs entries "
"for %s\n", cdev->dev.bus_id);
- } else {
- cdev->handler = &dasd_int_handler;
+ return ret;
}
+ cdev->handler = &dasd_int_handler;
+ /*
+ * Automatically online either all dasd devices (dasd_autodetect)
+ * or all devices specified with dasd= parameters during
+ * initial probe.
+ */
+ if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
+ (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
+ ret = ccw_device_set_online(cdev);
+ if (ret)
+ printk(KERN_WARNING
+ "dasd_generic_probe: could not initially online "
+ "ccw-device %s\n", cdev->dev.bus_id);
return ret;
}
@@ -1911,6 +1930,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
struct dasd_device *device;
int rc;
+ /* first online clears initial online feature flag */
+ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
@@ -2065,31 +2086,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
return ret;
}
-/*
- * Automatically online either all dasd devices (dasd_autodetect) or
- * all devices specified with dasd= parameters.
- */
-static int
-__dasd_auto_online(struct device *dev, void *data)
-{
- struct ccw_device *cdev;
-
- cdev = to_ccwdev(dev);
- if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
- ccw_device_set_online(cdev);
- return 0;
-}
-
-void
-dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
-{
- struct device_driver *drv;
-
- drv = get_driver(&dasd_discipline_driver->driver);
- driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
- put_driver(drv);
-}
-
static int __init
dasd_init(void)
@@ -2170,23 +2166,4 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
EXPORT_SYMBOL_GPL(dasd_generic_notify);
EXPORT_SYMBOL_GPL(dasd_generic_set_online);
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
-EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
index 1d11c2a9525d..1ddab8991d92 100644
--- a/drivers/s390/block/dasd_3370_erp.c
+++ b/drivers/s390/block/dasd_3370_erp.c
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_3370_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -12,10 +12,10 @@
/*
- * DASD_3370_ERP_EXAMINE
+ * DASD_3370_ERP_EXAMINE
*
* DESCRIPTION
- * Checks only for fatal/no/recover error.
+ * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside
* the interrupt handler.
*
@@ -23,7 +23,7 @@
* 'Chapter 7. 3370 Sense Data'.
*
* RETURN VALUES
- * dasd_era_none no error
+ * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others.
*/
@@ -82,22 +82,3 @@ dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
return dasd_era_recover;
} /* END dasd_3370_erp_examine */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 2ed51562319e..669805d4402d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1,6 +1,6 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_3990_erp.c
- * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
* Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
@@ -25,23 +25,23 @@ struct DCTL_data {
} __attribute__ ((packed));
/*
- *****************************************************************************
+ *****************************************************************************
* SECTION ERP EXAMINATION
- *****************************************************************************
+ *****************************************************************************
*/
/*
- * DASD_3990_ERP_EXAMINE_24
+ * DASD_3990_ERP_EXAMINE_24
*
* DESCRIPTION
- * Checks only for fatal (unrecoverable) error.
+ * Checks only for fatal (unrecoverable) error.
* A detailed examination of the sense data is done later outside
* the interrupt handler.
*
* Each bit configuration leading to an action code 2 (Exit with
* programming error or unusual condition indication)
* are handled as fatal error´s.
- *
+ *
* All other configurations are handled as recoverable errors.
*
* RETURN VALUES
@@ -93,15 +93,15 @@ dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
} /* END dasd_3990_erp_examine_24 */
/*
- * DASD_3990_ERP_EXAMINE_32
+ * DASD_3990_ERP_EXAMINE_32
*
* DESCRIPTION
- * Checks only for fatal/no/recoverable error.
+ * Checks only for fatal/no/recoverable error.
* A detailed examination of the sense data is done later outside
* the interrupt handler.
*
* RETURN VALUES
- * dasd_era_none no error
+ * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for recoverable others.
*/
@@ -128,10 +128,10 @@ dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
} /* end dasd_3990_erp_examine_32 */
/*
- * DASD_3990_ERP_EXAMINE
+ * DASD_3990_ERP_EXAMINE
*
* DESCRIPTION
- * Checks only for fatal/no/recover error.
+ * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside
* the interrupt handler.
*
@@ -139,7 +139,7 @@ dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
* 'Chapter 7. Error Recovery Procedures'.
*
* RETURN VALUES
- * dasd_era_none no error
+ * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others.
*/
@@ -178,18 +178,18 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
} /* END dasd_3990_erp_examine */
/*
- *****************************************************************************
+ *****************************************************************************
* SECTION ERP HANDLING
- *****************************************************************************
+ *****************************************************************************
*/
/*
- *****************************************************************************
+ *****************************************************************************
* 24 and 32 byte sense ERP functions
- *****************************************************************************
+ *****************************************************************************
*/
/*
- * DASD_3990_ERP_CLEANUP
+ * DASD_3990_ERP_CLEANUP
*
* DESCRIPTION
* Removes the already build but not necessary ERP request and sets
@@ -197,10 +197,10 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
*
* PARAMETER
* erp request to be blocked
- * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
+ * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
*
* RETURN VALUES
- * cqr original cqr
+ * cqr original cqr
*/
static struct dasd_ccw_req *
dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
@@ -214,7 +214,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
} /* end dasd_3990_erp_cleanup */
/*
- * DASD_3990_ERP_BLOCK_QUEUE
+ * DASD_3990_ERP_BLOCK_QUEUE
*
* DESCRIPTION
* Block the given device request queue to prevent from further
@@ -237,7 +237,7 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
}
/*
- * DASD_3990_ERP_INT_REQ
+ * DASD_3990_ERP_INT_REQ
*
* DESCRIPTION
* Handles 'Intervention Required' error.
@@ -277,7 +277,7 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_int_req */
/*
- * DASD_3990_ERP_ALTERNATE_PATH
+ * DASD_3990_ERP_ALTERNATE_PATH
*
* DESCRIPTION
* Repeat the operation on a different channel path.
@@ -330,15 +330,15 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
* DASD_3990_ERP_DCTL
*
* DESCRIPTION
- * Setup cqr to do the Diagnostic Control (DCTL) command with an
+ * Setup cqr to do the Diagnostic Control (DCTL) command with an
* Inhibit Write subcommand (0x20) and the given modifier.
*
* PARAMETER
* erp pointer to the current (failed) ERP
* modifier subcommand modifier
- *
+ *
* RETURN VALUES
- * dctl_cqr pointer to NEW dctl_cqr
+ * dctl_cqr pointer to NEW dctl_cqr
*
*/
static struct dasd_ccw_req *
@@ -386,7 +386,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
} /* end dasd_3990_erp_DCTL */
/*
- * DASD_3990_ERP_ACTION_1
+ * DASD_3990_ERP_ACTION_1
*
* DESCRIPTION
* Setup ERP to do the ERP action 1 (see Reference manual).
@@ -415,7 +415,7 @@ dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_action_1 */
/*
- * DASD_3990_ERP_ACTION_4
+ * DASD_3990_ERP_ACTION_4
*
* DESCRIPTION
* Setup ERP to do the ERP action 4 (see Reference manual).
@@ -453,11 +453,11 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
if (sense[25] == 0x1D) { /* state change pending */
- DEV_MESSAGE(KERN_INFO, device,
+ DEV_MESSAGE(KERN_INFO, device,
"waiting for state change pending "
"interrupt, %d retries left",
erp->retries);
-
+
dasd_3990_erp_block_queue(erp, 30*HZ);
} else if (sense[25] == 0x1E) { /* busy */
@@ -469,9 +469,9 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
} else {
/* no state change pending - retry */
- DEV_MESSAGE (KERN_INFO, device,
+ DEV_MESSAGE (KERN_INFO, device,
"redriving request immediately, "
- "%d retries left",
+ "%d retries left",
erp->retries);
erp->status = DASD_CQR_QUEUED;
}
@@ -482,13 +482,13 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_action_4 */
/*
- *****************************************************************************
+ *****************************************************************************
* 24 byte sense ERP functions (only)
- *****************************************************************************
+ *****************************************************************************
*/
/*
- * DASD_3990_ERP_ACTION_5
+ * DASD_3990_ERP_ACTION_5
*
* DESCRIPTION
* Setup ERP to do the ERP action 5 (see Reference manual).
@@ -523,7 +523,7 @@ dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
*
* PARAMETER
* sense current sense data
- *
+ *
* RETURN VALUES
* void
*/
@@ -1150,9 +1150,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
* PARAMETER
* erp current erp_head
* sense current sense data
- *
+ *
* RETURN VALUES
- * erp 'new' erp_head - pointer to new ERP
+ * erp 'new' erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
@@ -1185,7 +1185,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_com_rej */
/*
- * DASD_3990_ERP_BUS_OUT
+ * DASD_3990_ERP_BUS_OUT
*
* DESCRIPTION
* Handles 24 byte 'Bus Out Parity Check' error.
@@ -1483,7 +1483,7 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
*
* PARAMETER
* erp already added default ERP
- *
+ *
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
@@ -1527,11 +1527,11 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_file_prot */
/*
- * DASD_3990_ERP_INSPECT_24
+ * DASD_3990_ERP_INSPECT_24
*
* DESCRIPTION
* Does a detailed inspection of the 24 byte sense data
- * and sets up a related error recovery action.
+ * and sets up a related error recovery action.
*
* PARAMETER
* sense sense data of the actual error
@@ -1602,13 +1602,13 @@ dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
} /* END dasd_3990_erp_inspect_24 */
/*
- *****************************************************************************
+ *****************************************************************************
* 32 byte sense ERP functions (only)
- *****************************************************************************
+ *****************************************************************************
*/
/*
- * DASD_3990_ERPACTION_10_32
+ * DASD_3990_ERPACTION_10_32
*
* DESCRIPTION
* Handles 32 byte 'Action 10' of Single Program Action Codes.
@@ -1616,7 +1616,7 @@ dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
*
* PARAMETER
* erp current erp_head
- * sense current sense data
+ * sense current sense data
* RETURN VALUES
* erp modified erp_head
*/
@@ -1640,18 +1640,18 @@ dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
*
* DESCRIPTION
* Handles 32 byte 'Action 1B' of Single Program Action Codes.
- * A write operation could not be finished because of an unexpected
+ * A write operation could not be finished because of an unexpected
* condition.
- * The already created 'default erp' is used to get the link to
- * the erp chain, but it can not be used for this recovery
+ * The already created 'default erp' is used to get the link to
+ * the erp chain, but it can not be used for this recovery
* action because it contains no DE/LO data space.
*
* PARAMETER
* default_erp already added default erp.
- * sense current sense data
+ * sense current sense data
*
* RETURN VALUES
- * erp new erp or
+ * erp new erp or
* default_erp in case of imprecise ending or error
*/
static struct dasd_ccw_req *
@@ -1789,16 +1789,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
* DASD_3990_UPDATE_1B
*
* DESCRIPTION
- * Handles the update to the 32 byte 'Action 1B' of Single Program
+ * Handles the update to the 32 byte 'Action 1B' of Single Program
* Action Codes in case the first action was not successful.
* The already created 'previous_erp' is the currently not successful
- * ERP.
+ * ERP.
*
* PARAMETER
* previous_erp already created previous erp.
- * sense current sense data
+ * sense current sense data
* RETURN VALUES
- * erp modified erp
+ * erp modified erp
*/
static struct dasd_ccw_req *
dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
@@ -1897,7 +1897,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
} /* end dasd_3990_update_1B */
/*
- * DASD_3990_ERP_COMPOUND_RETRY
+ * DASD_3990_ERP_COMPOUND_RETRY
*
* DESCRIPTION
* Handles the compound ERP action retry code.
@@ -1943,7 +1943,7 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_retry */
/*
- * DASD_3990_ERP_COMPOUND_PATH
+ * DASD_3990_ERP_COMPOUND_PATH
*
* DESCRIPTION
* Handles the compound ERP action for retry on alternate
@@ -1965,7 +1965,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) {
- /* reset the lpm and the status to be able to
+ /* reset the lpm and the status to be able to
* try further actions. */
erp->lpm = 0;
@@ -1980,7 +1980,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_path */
/*
- * DASD_3990_ERP_COMPOUND_CODE
+ * DASD_3990_ERP_COMPOUND_CODE
*
* DESCRIPTION
* Handles the compound ERP action for retry code.
@@ -2001,18 +2001,18 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
switch (sense[28]) {
case 0x17:
- /* issue a Diagnostic Control command with an
+ /* issue a Diagnostic Control command with an
* Inhibit Write subcommand and controler modifier */
erp = dasd_3990_erp_DCTL(erp, 0x20);
break;
-
+
case 0x25:
/* wait for 5 seconds and retry again */
erp->retries = 1;
-
+
dasd_3990_erp_block_queue (erp, 5*HZ);
break;
-
+
default:
/* should not happen - continue */
break;
@@ -2026,7 +2026,7 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_code */
/*
- * DASD_3990_ERP_COMPOUND_CONFIG
+ * DASD_3990_ERP_COMPOUND_CONFIG
*
* DESCRIPTION
* Handles the compound ERP action for configruation
@@ -2063,10 +2063,10 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_config */
/*
- * DASD_3990_ERP_COMPOUND
+ * DASD_3990_ERP_COMPOUND
*
* DESCRIPTION
- * Does the further compound program action if
+ * Does the further compound program action if
* compound retry was not successful.
*
* PARAMETER
@@ -2110,11 +2110,11 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound */
/*
- * DASD_3990_ERP_INSPECT_32
+ * DASD_3990_ERP_INSPECT_32
*
* DESCRIPTION
* Does a detailed inspection of the 32 byte sense data
- * and sets up a related error recovery action.
+ * and sets up a related error recovery action.
*
* PARAMETER
* sense sense data of the actual error
@@ -2228,9 +2228,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_inspect_32 */
/*
- *****************************************************************************
+ *****************************************************************************
* main ERP control fuctions (24 and 32 byte sense)
- *****************************************************************************
+ *****************************************************************************
*/
/*
@@ -2243,7 +2243,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
* PARAMETER
* erp pointer to the currently created default ERP
* RETURN VALUES
- * erp_new contens was possibly modified
+ * erp_new contens was possibly modified
*/
static struct dasd_ccw_req *
dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
@@ -2272,14 +2272,14 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
/*
* DASD_3990_ERP_ADD_ERP
- *
+ *
* DESCRIPTION
* This funtion adds an additional request block (ERP) to the head of
* the given cqr (or erp).
* This erp is initialized as an default erp (retry TIC)
*
* PARAMETER
- * cqr head of the current ERP-chain (or single cqr if
+ * cqr head of the current ERP-chain (or single cqr if
* first error)
* RETURN VALUES
* erp pointer to new ERP-chain head
@@ -2332,15 +2332,15 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
}
/*
- * DASD_3990_ERP_ADDITIONAL_ERP
- *
+ * DASD_3990_ERP_ADDITIONAL_ERP
+ *
* DESCRIPTION
* An additional ERP is needed to handle the current error.
* Add ERP to the head of the ERP-chain containing the ERP processing
* determined based on the sense data.
*
* PARAMETER
- * cqr head of the current ERP-chain (or single cqr if
+ * cqr head of the current ERP-chain (or single cqr if
* first error)
*
* RETURN VALUES
@@ -2376,7 +2376,7 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
* 24 byte sense byte 25 and 27 is set as well.
*
* PARAMETER
- * cqr1 first cqr, which will be compared with the
+ * cqr1 first cqr, which will be compared with the
* cqr2 second cqr.
*
* RETURN VALUES
@@ -2415,7 +2415,7 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
* cqr failed cqr (either original cqr or already an erp)
*
* RETURN VALUES
- * erp erp-pointer to the already defined error
+ * erp erp-pointer to the already defined error
* recovery procedure OR
* NULL if a 'new' error occurred.
*/
@@ -2451,10 +2451,10 @@ dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
* DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
*
* DESCRIPTION
- * No retry is left for the current ERP. Check what has to be done
+ * No retry is left for the current ERP. Check what has to be done
* with the ERP.
* - do further defined ERP action or
- * - wait for interrupt or
+ * - wait for interrupt or
* - exit with permanent error
*
* PARAMETER
@@ -2485,7 +2485,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
if (!(sense[2] & DASD_SENSE_BIT_0)) {
- /* issue a Diagnostic Control command with an
+ /* issue a Diagnostic Control command with an
* Inhibit Write subcommand */
switch (sense[25]) {
@@ -2535,14 +2535,14 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
} /* end dasd_3990_erp_further_erp */
/*
- * DASD_3990_ERP_HANDLE_MATCH_ERP
+ * DASD_3990_ERP_HANDLE_MATCH_ERP
*
* DESCRIPTION
* An error occurred again and an ERP has been detected which is already
- * used to handle this error (e.g. retries).
+ * used to handle this error (e.g. retries).
* All prior ERP's are asumed to be successful and therefore removed
* from queue.
- * If retry counter of matching erp is already 0, it is checked if further
+ * If retry counter of matching erp is already 0, it is checked if further
* action is needed (besides retry) or if the ERP has failed.
*
* PARAMETER
@@ -2631,7 +2631,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
* erp erp-pointer to the head of the ERP action chain.
* This means:
* - either a ptr to an additional ERP cqr or
- * - the original given cqr (which's status might
+ * - the original given cqr (which's status might
* be modified)
*/
struct dasd_ccw_req *
@@ -2723,22 +2723,3 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
return erp;
} /* end dasd_3990_erp_action */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
index dc861446d056..6e082688475a 100644
--- a/drivers/s390/block/dasd_9336_erp.c
+++ b/drivers/s390/block/dasd_9336_erp.c
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_9336_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -12,10 +12,10 @@
/*
- * DASD_9336_ERP_EXAMINE
+ * DASD_9336_ERP_EXAMINE
*
* DESCRIPTION
- * Checks only for fatal/no/recover error.
+ * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside
* the interrupt handler.
*
@@ -23,7 +23,7 @@
* 'Chapter 7. 9336 Sense Data'.
*
* RETURN VALUES
- * dasd_era_none no error
+ * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others.
*/
@@ -39,22 +39,3 @@ dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
return dasd_era_recover;
} /* END dasd_9336_erp_examine */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
index 4a5b79569aaa..ddecb9808ed4 100644
--- a/drivers/s390/block/dasd_9343_erp.c
+++ b/drivers/s390/block/dasd_9343_erp.c
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_9345_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 216bc4fba199..9e9ae7179602 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -27,7 +27,7 @@
#include "dasd_int.h"
kmem_cache_t *dasd_page_cache;
-EXPORT_SYMBOL(dasd_page_cache);
+EXPORT_SYMBOL_GPL(dasd_page_cache);
/*
* dasd_devmap_t is used to store the features and the relation
@@ -49,6 +49,20 @@ struct dasd_devmap {
};
/*
+ * dasd_servermap is used to store the server_id of all storage servers
+ * accessed by DASD device driver.
+ */
+struct dasd_servermap {
+ struct list_head list;
+ struct server_id {
+ char vendor[4];
+ char serial[15];
+ } sid;
+};
+
+static struct list_head dasd_serverlist;
+
+/*
* Parameter parsing functions for dasd= parameter. The syntax is:
* <devno> : (0x)?[0-9a-fA-F]+
* <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
@@ -64,6 +78,8 @@ struct dasd_devmap {
int dasd_probeonly = 0; /* is true, when probeonly mode is active */
int dasd_autodetect = 0; /* is true, when autodetection is active */
+int dasd_nopav = 0; /* is true, when PAV is disabled */
+EXPORT_SYMBOL_GPL(dasd_nopav);
/*
* char *dasd[] is intended to hold the ranges supplied by the dasd= statement
@@ -123,7 +139,7 @@ static inline int
dasd_busid(char **str, int *id0, int *id1, int *devno)
{
int val, old_style;
-
+
/* check for leading '0x' */
old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -179,7 +195,7 @@ dasd_feature_list(char *str, char **endp)
features = 0;
while (1) {
- for (len = 0;
+ for (len = 0;
str[len] && str[len] != ':' && str[len] != ')'; len++);
if (len == 2 && !strncmp(str, "ro", 2))
features |= DASD_FEATURE_READONLY;
@@ -228,19 +244,24 @@ dasd_parse_keyword( char *parsestring ) {
length = strlen(parsestring);
residual_str = parsestring + length;
}
- if (strncmp ("autodetect", parsestring, length) == 0) {
+ if (strncmp("autodetect", parsestring, length) == 0) {
dasd_autodetect = 1;
MESSAGE (KERN_INFO, "%s",
"turning to autodetection mode");
return residual_str;
}
- if (strncmp ("probeonly", parsestring, length) == 0) {
+ if (strncmp("probeonly", parsestring, length) == 0) {
dasd_probeonly = 1;
MESSAGE(KERN_INFO, "%s",
"turning to probeonly mode");
return residual_str;
}
- if (strncmp ("fixedbuffers", parsestring, length) == 0) {
+ if (strncmp("nopav", parsestring, length) == 0) {
+ dasd_nopav = 1;
+ MESSAGE(KERN_INFO, "%s", "disable PAV mode");
+ return residual_str;
+ }
+ if (strncmp("fixedbuffers", parsestring, length) == 0) {
if (dasd_page_cache)
return residual_str;
dasd_page_cache =
@@ -294,6 +315,8 @@ dasd_parse_range( char *parsestring ) {
features = dasd_feature_list(str, &str);
if (features < 0)
return ERR_PTR(-EINVAL);
+ /* each device in dasd= parameter should be set initially online */
+ features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) {
sprintf(bus_id, "%01x.%01x.%04x",
from_id0, from_id1, from++);
@@ -359,7 +382,7 @@ dasd_parse(void)
* Add a devmap for the device specified by busid. It is possible that
* the devmap already exists (dasd= parameter). The order of the devices
* added through this function will define the kdevs for the individual
- * devices.
+ * devices.
*/
static struct dasd_devmap *
dasd_add_busid(char *bus_id, int features)
@@ -368,7 +391,7 @@ dasd_add_busid(char *bus_id, int features)
int hash;
new = (struct dasd_devmap *)
- kmalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+ kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
spin_lock(&dasd_devmap_lock);
@@ -630,7 +653,8 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-dasd_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+dasd_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct dasd_devmap *devmap;
int ro_flag;
@@ -658,7 +682,7 @@ static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
* use_diag controls whether the driver should use diag rather than ssch
* to talk to the device
*/
-static ssize_t
+static ssize_t
dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
@@ -673,7 +697,8 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct dasd_devmap *devmap;
ssize_t rc;
@@ -697,11 +722,11 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const cha
return rc;
}
-static
-DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
+static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
static ssize_t
-dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *buf)
+dasd_discipline_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct dasd_devmap *devmap;
char *dname;
@@ -834,6 +859,38 @@ static struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs,
};
+/*
+ * Check if the related storage server is already contained in the
+ * dasd_serverlist. If server is not contained, create new entry.
+ * Return 0 if server was already in serverlist,
+ * 1 if the server was added successfully
+ * <0 in case of error.
+ */
+static int
+dasd_add_server(struct dasd_uid *uid)
+{
+ struct dasd_servermap *new, *tmp;
+
+ /* check if server is already contained */
+ list_for_each_entry(tmp, &dasd_serverlist, list)
+ // normale cmp?
+ if (strncmp(tmp->sid.vendor, uid->vendor,
+ sizeof(tmp->sid.vendor)) == 0
+ && strncmp(tmp->sid.serial, uid->serial,
+ sizeof(tmp->sid.serial)) == 0)
+ return 0;
+
+ new = (struct dasd_servermap *)
+ kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor));
+ strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial));
+ list_add(&new->list, &dasd_serverlist);
+ return 1;
+}
+
/*
* Return copy of the device unique identifier.
@@ -854,21 +911,26 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
/*
* Register the given device unique identifier into devmap struct.
+ * Return 0 if server was already in serverlist,
+ * 1 if the server was added successful
+ * <0 in case of error.
*/
int
dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{
struct dasd_devmap *devmap;
+ int rc;
devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
devmap->uid = *uid;
+ rc = dasd_add_server(uid);
spin_unlock(&dasd_devmap_lock);
- return 0;
+ return rc;
}
-EXPORT_SYMBOL(dasd_set_uid);
+EXPORT_SYMBOL_GPL(dasd_set_uid);
/*
* Return value of the specified feature.
@@ -880,7 +942,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap))
- return (int) PTR_ERR(devmap);
+ return PTR_ERR(devmap);
return ((devmap->features & feature) != 0);
}
@@ -896,7 +958,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap))
- return (int) PTR_ERR(devmap);
+ return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
if (flag)
@@ -932,8 +994,10 @@ dasd_devmap_init(void)
dasd_max_devindex = 0;
for (i = 0; i < 256; i++)
INIT_LIST_HEAD(&dasd_hashlists[i]);
- return 0;
+ /* Initialize servermap structure. */
+ INIT_LIST_HEAD(&dasd_serverlist);
+ return 0;
}
void
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 3f9d704d2657..4002f6c1c1b3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_diag.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
@@ -336,7 +336,7 @@ dasd_diag_check_device(struct dasd_device *device)
private = (struct dasd_diag_private *) device->private;
if (private == NULL) {
- private = kmalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+ private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private data");
@@ -527,7 +527,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
datasize, device);
if (IS_ERR(cqr))
return cqr;
-
+
dreq = (struct dasd_diag_req *) cqr->data;
dreq->block_count = count;
dbio = dreq->bio;
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index 38a4e55f8953..b8c78267ff3e 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_diag.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 7d5a6cee4bd8..0dfab30e8089 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1,7 +1,7 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_eckd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
- * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <asm/todclk.h>
#include <asm/uaccess.h>
+#include <asm/cio.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
@@ -89,17 +90,22 @@ dasd_eckd_probe (struct ccw_device *cdev)
{
int ret;
- ret = dasd_generic_probe (cdev, &dasd_eckd_discipline);
- if (ret)
+ /* set ECKD specific ccw-device options */
+ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
+ if (ret) {
+ printk(KERN_WARNING
+ "dasd_eckd_probe: could not set ccw-device options "
+ "for %s\n", cdev->dev.bus_id);
return ret;
- ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_ALLOW_FORCE);
- return 0;
+ }
+ ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
+ return ret;
}
static int
dasd_eckd_set_online(struct ccw_device *cdev)
{
- return dasd_generic_set_online (cdev, &dasd_eckd_discipline);
+ return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
}
static struct ccw_driver dasd_eckd_driver = {
@@ -210,14 +216,14 @@ check_XRC (struct ccw1 *de_ccw,
/* switch on System Time Stamp - needed for XRC Support */
if (private->rdc_data.facilities.XRC_supported) {
-
+
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
-
+
data->ep_sys_time = get_clock ();
-
+
de_ccw->count = sizeof (struct DE_eckd_data);
- de_ccw->flags |= CCW_FLAG_SLI;
+ de_ccw->flags |= CCW_FLAG_SLI;
}
return;
@@ -296,8 +302,8 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
/* check for sequential prestage - enhance cylinder range */
if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
data->attributes.operation == DASD_SEQ_ACCESS) {
-
- if (end.cyl + private->attrib.nr_cyl < geo.cyl)
+
+ if (end.cyl + private->attrib.nr_cyl < geo.cyl)
end.cyl += private->attrib.nr_cyl;
else
end.cyl = (geo.cyl - 1);
@@ -317,7 +323,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
struct dasd_eckd_private *private;
int sector;
int dn, d;
-
+
private = (struct dasd_eckd_private *) device->private;
DBF_DEV_EVENT(DBF_INFO, device,
@@ -541,6 +547,86 @@ dasd_eckd_read_conf(struct dasd_device *device)
}
/*
+ * Build CP for Perform Subsystem Function - SSC.
+ */
+struct dasd_ccw_req *
+dasd_eckd_build_psf_ssc(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_psf_ssc_data *psf_ssc_data;
+ struct ccw1 *ccw;
+
+ cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
+ sizeof(struct dasd_psf_ssc_data),
+ device);
+
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate PSF-SSC request");
+ return cqr;
+ }
+ psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
+ psf_ssc_data->order = PSF_ORDER_SSC;
+ psf_ssc_data->suborder = 0x08;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->cda = (__u32)(addr_t)psf_ssc_data;
+ ccw->count = 66;
+
+ cqr->device = device;
+ cqr->expires = 10*HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+/*
+ * Perform Subsystem Function.
+ * It is necessary to trigger CIO for channel revalidation since this
+ * call might change behaviour of DASD devices.
+ */
+static int
+dasd_eckd_psf_ssc(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ cqr = dasd_eckd_build_psf_ssc(device);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+
+ rc = dasd_sleep_on(cqr);
+ if (!rc)
+ /* trigger CIO to reprobe devices */
+ css_schedule_reprobe();
+ dasd_sfree_request(cqr, cqr->device);
+ return rc;
+}
+
+/*
+ * Valide storage server of current device.
+ */
+static int
+dasd_eckd_validate_server(struct dasd_device *device)
+{
+ int rc;
+
+ /* Currently PAV is the only reason to 'validate' server on LPAR */
+ if (dasd_nopav || MACHINE_IS_VM)
+ return 0;
+
+ rc = dasd_eckd_psf_ssc(device);
+ if (rc)
+ /* may be requested feature is not available on server,
+ * therefore just report error and go ahead */
+ DEV_MESSAGE(KERN_INFO, device,
+ "Perform Subsystem Function returned rc=%d", rc);
+ /* RE-Read Configuration Data */
+ return dasd_eckd_read_conf(device);
+}
+
+/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
*/
@@ -554,7 +640,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
if (private == NULL) {
- private = kmalloc(sizeof(struct dasd_eckd_private),
+ private = kzalloc(sizeof(struct dasd_eckd_private),
GFP_KERNEL | GFP_DMA);
if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
@@ -562,7 +648,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"data");
return -ENOMEM;
}
- memset(private, 0, sizeof(struct dasd_eckd_private));
device->private = (void *) private;
}
/* Invalidate status of initial analysis. */
@@ -571,16 +656,29 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private->attrib.operation = DASD_NORMAL_CACHE;
private->attrib.nr_cyl = 0;
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ return rc;
+
+ /* Generate device unique id and register in devmap */
+ rc = dasd_eckd_generate_uid(device, &uid);
+ if (rc)
+ return rc;
+ rc = dasd_set_uid(device->cdev, &uid);
+ if (rc == 1) /* new server found */
+ rc = dasd_eckd_validate_server(device);
+ if (rc)
+ return rc;
+
/* Read Device Characteristics */
rdc_data = (void *) &(private->rdc_data);
memset(rdc_data, 0, sizeof(rdc_data));
rc = read_dev_chars(device->cdev, &rdc_data, 64);
- if (rc) {
+ if (rc)
DEV_MESSAGE(KERN_WARNING, device,
- "Read device characteristics returned error %d",
- rc);
- return rc;
- }
+ "Read device characteristics returned "
+ "rc=%d", rc);
DEV_MESSAGE(KERN_INFO, device,
"%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
@@ -591,19 +689,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private->rdc_data.no_cyl,
private->rdc_data.trk_per_cyl,
private->rdc_data.sec_per_trk);
-
- /* Read Configuration Data */
- rc = dasd_eckd_read_conf (device);
- if (rc)
- return rc;
-
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &uid);
- if (rc)
- return rc;
-
- rc = dasd_set_uid(device->cdev, &uid);
-
return rc;
}
@@ -773,7 +858,7 @@ dasd_eckd_end_analysis(struct dasd_device *device)
((private->rdc_data.no_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (device->bp_block >> 9)) >> 1),
- ((blk_per_trk * device->bp_block) >> 10),
+ ((blk_per_trk * device->bp_block) >> 10),
private->uses_cdl ?
"compatible disk layout" : "linux disk layout");
@@ -970,7 +1055,7 @@ dasd_eckd_format_device(struct dasd_device * device,
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
- }
+ }
}
if ((fdata->intensity & 0x08) &&
fdata->start_unit == 1) {
@@ -1270,7 +1355,7 @@ dasd_eckd_fill_info(struct dasd_device * device,
/*
* Release device ioctl.
- * Buils a channel programm to releases a prior reserved
+ * Buils a channel programm to releases a prior reserved
* (see dasd_eckd_reserve) device.
*/
static int
@@ -1310,8 +1395,8 @@ dasd_eckd_release(struct dasd_device *device)
/*
* Reserve device ioctl.
* Options are set to 'synchronous wait for interrupt' and
- * 'timeout the request'. This leads to a terminate IO if
- * the interrupt is outstanding for a certain time.
+ * 'timeout the request'. This leads to a terminate IO if
+ * the interrupt is outstanding for a certain time.
*/
static int
dasd_eckd_reserve(struct dasd_device *device)
@@ -1349,7 +1434,7 @@ dasd_eckd_reserve(struct dasd_device *device)
/*
* Steal lock ioctl - unconditional reserve device.
- * Buils a channel programm to break a device's reservation.
+ * Buils a channel programm to break a device's reservation.
* (unconditional reserve)
*/
static int
@@ -1522,6 +1607,40 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
}
/*
+ * Dump the range of CCWs into 'page' buffer
+ * and return number of printed chars.
+ */
+static inline int
+dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+{
+ int len, count;
+ char *datap;
+
+ len = 0;
+ while (from <= to) {
+ len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ from, ((int *) from)[0], ((int *) from)[1]);
+
+ /* get pointer to data (consider IDALs) */
+ if (from->flags & CCW_FLAG_IDA)
+ datap = (char *) *((addr_t *) (addr_t) from->cda);
+ else
+ datap = (char *) ((addr_t) from->cda);
+
+ /* dump data (max 32 bytes) */
+ for (count = 0; count < from->count && count < 32; count++) {
+ if (count % 8 == 0) len += sprintf(page + len, " ");
+ if (count % 4 == 0) len += sprintf(page + len, " ");
+ len += sprintf(page + len, "%02x", datap[count]);
+ }
+ len += sprintf(page + len, "\n");
+ from++;
+ }
+ return len;
+}
+
+/*
* Print sense data and related channel program.
* Parts are printed because printk buffer is only 1024 bytes.
*/
@@ -1530,8 +1649,8 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
char *page;
- struct ccw1 *act, *end, *last;
- int len, sl, sct, count;
+ struct ccw1 *first, *last, *fail, *from, *to;
+ int len, sl, sct;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
@@ -1539,7 +1658,8 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"No memory to dump sense data");
return;
}
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ /* dump the sense data */
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
" I/O status report for device %s:\n",
device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -1564,87 +1684,55 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
- irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
- irb->ecw[1] & 0x10 ? "" : "no");
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 24 Byte: %x MSG %x, "
+ "%s MSGb to SYSOP\n",
+ irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
+ irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
- irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " 32 Byte: Format: %x "
+ "Exception class %x\n",
+ irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, KERN_ERR PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
}
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
-
- /* dump the Channel Program */
- /* print first CCWs (maximum 8) */
- act = req->cpaddr;
- for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
- end = min(act + 8, last);
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ printk("%s", page);
+
+ /* dump the Channel Program (max 140 Bytes per line) */
+ /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+ first = req->cpaddr;
+ for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ to = min(first + 6, last);
+ len = sprintf(page, KERN_ERR PRINTK_HEADER
" Related CP in req: %p\n", req);
- while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
- act, ((int *) act)[0], ((int *) act)[1]);
- for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
- len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
- [(count>>2)]);
- len += sprintf(page + len, "\n");
- act++;
- }
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
+ dasd_eckd_dump_ccw_range(first, to, page + len);
+ printk("%s", page);
- /* print failing CCW area */
+ /* print failing CCW area (maximum 4) */
+ /* scsw->cda is either valid or zero */
len = 0;
- if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
- act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
- }
- end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
- while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
- act, ((int *) act)[0], ((int *) act)[1]);
- for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
- len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
- [(count>>2)]);
- len += sprintf(page + len, "\n");
- act++;
+ from = ++to;
+ fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
+ if (from < fail - 2) {
+ from = fail - 2; /* there is a gap - print header */
+ len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
}
+ to = min(fail + 1, last);
+ len += dasd_eckd_dump_ccw_range(from, to, page + len);
- /* print last CCWs */
- if (act < last - 2) {
- act = last - 2;
+ /* print last CCWs (maximum 2) */
+ from = max(from, ++to);
+ if (from < last - 1) {
+ from = last - 1; /* there is a gap - print header */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
}
- while (act <= last) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
- act, ((int *) act)[0], ((int *) act)[1]);
- for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
- len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
- [(count>>2)]);
- len += sprintf(page + len, "\n");
- act++;
- }
+ len += dasd_eckd_dump_ccw_range(from, last, page + len);
if (len > 0)
- MESSAGE_LOG(KERN_ERR, "%s",
- page + sizeof(KERN_ERR PRINTK_HEADER));
+ printk("%s", page);
free_page((unsigned long) page);
}
@@ -1685,14 +1773,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
static int __init
dasd_eckd_init(void)
{
- int ret;
-
ASCEBC(dasd_eckd_discipline.ebcname, 4);
-
- ret = ccw_driver_register(&dasd_eckd_driver);
- if (!ret)
- dasd_generic_auto_online(&dasd_eckd_driver);
- return ret;
+ return ccw_driver_register(&dasd_eckd_driver);
}
static void __exit
@@ -1703,22 +1785,3 @@ dasd_eckd_cleanup(void)
module_init(dasd_eckd_init);
module_exit(dasd_eckd_cleanup);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index d5734e976e1c..712ff1650134 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -1,7 +1,7 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_eckd.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
- * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
@@ -41,9 +41,10 @@
#define DASD_ECKD_CCW_RESERVE 0xB4
/*
- *Perform Subsystem Function / Sub-Orders
+ * Perform Subsystem Function / Sub-Orders
*/
-#define PSF_ORDER_PRSSD 0x18
+#define PSF_ORDER_PRSSD 0x18
+#define PSF_ORDER_SSC 0x1D
/*****************************************************************************
* SECTION: Type Definitions
@@ -155,7 +156,7 @@ struct dasd_eckd_characteristics {
unsigned char reserved2:4;
unsigned char reserved3:8;
unsigned char defect_wr:1;
- unsigned char XRC_supported:1;
+ unsigned char XRC_supported:1;
unsigned char reserved4:1;
unsigned char striping:1;
unsigned char reserved5:4;
@@ -343,7 +344,7 @@ struct dasd_eckd_path {
};
/*
- * Perform Subsystem Function - Prepare for Read Subsystem Data
+ * Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
@@ -353,4 +354,15 @@ struct dasd_psf_prssd_data {
unsigned char varies[9];
} __attribute__ ((packed));
+/*
+ * Perform Subsystem Function - Set Subsystem Characteristics
+ */
+struct dasd_psf_ssc_data {
+ unsigned char order;
+ unsigned char flags;
+ unsigned char cu_type[4];
+ unsigned char suborder;
+ unsigned char reserved[59];
+} __attribute__((packed));
+
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 2d946b6ca074..da65f1b032f5 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -89,7 +89,7 @@ struct eerbuffer {
};
static LIST_HEAD(bufferlist);
-static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bufferlock);
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
/*
@@ -276,7 +276,7 @@ struct dasd_eer_header {
__u64 tv_sec;
__u64 tv_usec;
char busid[DASD_EER_BUSID_SIZE];
-};
+} __attribute__ ((packed));
/*
* The following function can be used for those triggers that have
@@ -521,6 +521,8 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
unsigned long flags;
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
+ if (!eerb)
+ return -ENOMEM;
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index b842377cb0c6..4108d96f6a5a 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -90,7 +90,7 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
/* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) {
- DEV_MESSAGE (KERN_DEBUG, device,
+ DEV_MESSAGE (KERN_DEBUG, device,
"default ERP called (%i retries left)",
cqr->retries);
cqr->lpm = LPM_ANYPATH;
@@ -155,7 +155,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
/*
* Print the hex dump of the memory used by a request. This includes
- * all error recovery ccws that have been chained in from of the
+ * all error recovery ccws that have been chained in from of the
* real request.
*/
static inline void
@@ -227,12 +227,12 @@ dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
/*
* Log bytes arround failed CCW but only if we did
* not log the whole CP of the CCW is outside the
- * logged CP.
+ * logged CP.
*/
if (cplength > 40 ||
((addr_t) cpa < (addr_t) lcqr->cpaddr &&
(addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
-
+
DEV_MESSAGE(KERN_ERR, device,
"Failed CCW (%p) (area):",
(void *) (long) cpa);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 91145698f8e9..bb7755b9b19d 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -56,19 +56,13 @@ static struct ccw_driver dasd_fba_driver; /* see below */
static int
dasd_fba_probe(struct ccw_device *cdev)
{
- int ret;
-
- ret = dasd_generic_probe (cdev, &dasd_fba_discipline);
- if (ret)
- return ret;
- ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
- return 0;
+ return dasd_generic_probe(cdev, &dasd_fba_discipline);
}
static int
dasd_fba_set_online(struct ccw_device *cdev)
{
- return dasd_generic_set_online (cdev, &dasd_fba_discipline);
+ return dasd_generic_set_online(cdev, &dasd_fba_discipline);
}
static struct ccw_driver dasd_fba_driver = {
@@ -125,13 +119,13 @@ static int
dasd_fba_check_characteristics(struct dasd_device *device)
{
struct dasd_fba_private *private;
- struct ccw_device *cdev = device->cdev;
+ struct ccw_device *cdev = device->cdev;
void *rdc_data;
int rc;
private = (struct dasd_fba_private *) device->private;
if (private == NULL) {
- private = kmalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
+ private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private "
@@ -204,7 +198,7 @@ dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
if (irb->scsw.cstat == 0x00 &&
irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
return dasd_era_none;
-
+
cdev = device->cdev;
switch (cdev->id.dev_type) {
case 0x3370:
@@ -539,7 +533,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
* 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
* 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
* up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
- * addition we have one define extent ccw + 16 bytes of data and a
+ * addition we have one define extent ccw + 16 bytes of data and a
* locate record ccw for each block (stupid devices!) + 16 bytes of data.
* That makes:
* (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
@@ -569,16 +563,8 @@ static struct dasd_discipline dasd_fba_discipline = {
static int __init
dasd_fba_init(void)
{
- int ret;
-
ASCEBC(dasd_fba_discipline.ebcname, 4);
-
- ret = ccw_driver_register(&dasd_fba_driver);
- if (ret)
- return ret;
-
- dasd_generic_auto_online(&dasd_fba_driver);
- return 0;
+ return ccw_driver_register(&dasd_fba_driver);
}
static void __exit
@@ -589,22 +575,3 @@ dasd_fba_cleanup(void)
module_init(dasd_fba_init);
module_exit(dasd_fba_cleanup);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index da1fa91fc01d..14c910baa5fe 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -1,4 +1,4 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_fba.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d4b13e300a76..03a83efc34c4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -1,7 +1,7 @@
-/*
+/*
* File...........: linux/drivers/s390/block/dasd_int.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
- * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
@@ -186,7 +186,7 @@ struct dasd_ccw_req {
void *callback_data;
};
-/*
+/*
* dasd_ccw_req -> status can be:
*/
#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
@@ -248,7 +248,7 @@ struct dasd_discipline {
/*
* Error recovery functions. examine_error() returns a value that
* indicates what to do for an error condition. If examine_error()
- * returns 'dasd_era_recover' erp_action() is called to create a
+ * returns 'dasd_era_recover' erp_action() is called to create a
* special error recovery ccw. erp_postaction() is called after
* an error recovery ccw has finished its execution. dump_sense
* is called for every error condition to print the sense data
@@ -302,11 +302,11 @@ struct dasd_device {
spinlock_t request_queue_lock;
struct block_device *bdev;
unsigned int devindex;
- unsigned long blocks; /* size of volume in blocks */
- unsigned int bp_block; /* bytes per block */
- unsigned int s2b_shift; /* log2 (bp_block/512) */
- unsigned long flags; /* per device flags */
- unsigned short features; /* copy of devmap-features (read-only!) */
+ unsigned long blocks; /* size of volume in blocks */
+ unsigned int bp_block; /* bytes per block */
+ unsigned int s2b_shift; /* log2 (bp_block/512) */
+ unsigned long flags; /* per device flags */
+ unsigned short features; /* copy of devmap-features (read-only!) */
/* extended error reporting stuff (eer) */
struct dasd_ccw_req *eer_cqr;
@@ -513,12 +513,12 @@ void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
-void dasd_generic_auto_online (struct ccw_driver *);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
extern int dasd_autodetect;
+extern int dasd_nopav;
int dasd_devmap_init(void);
void dasd_devmap_exit(void);
@@ -606,22 +606,3 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#endif /* __KERNEL__ */
#endif /* DASD_H */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: 1
- * tab-width: 8
- * End:
- */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index b8c80d28df41..302bcd0f28be 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -90,10 +90,10 @@ static int
dasd_ioctl_quiesce(struct dasd_device *device)
{
unsigned long flags;
-
+
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
-
+
DEV_MESSAGE (KERN_DEBUG, device, "%s",
"Quiesce IO on device");
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
@@ -110,13 +110,13 @@ static int
dasd_ioctl_resume(struct dasd_device *device)
{
unsigned long flags;
-
- if (!capable (CAP_SYS_ADMIN))
+
+ if (!capable (CAP_SYS_ADMIN))
return -EACCES;
DEV_MESSAGE (KERN_DEBUG, device, "%s",
"resume IO on device");
-
+
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -287,7 +287,7 @@ dasd_ioctl_information(struct dasd_device *device,
dasd_info->open_count = atomic_read(&device->open_count);
if (!device->bdev)
dasd_info->open_count++;
-
+
/*
* check if device is really formatted
* LDL / CDL was returned by 'fill_info'
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index eecb2afad5c2..3c1314b7391b 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -50,6 +50,9 @@ struct raw3270 {
unsigned char *ascebc; /* ascii -> ebcdic table */
struct class_device *clttydev; /* 3270-class tty device ptr */
struct class_device *cltubdev; /* 3270-class tub device ptr */
+
+ struct raw3270_request init_request;
+ unsigned char init_data[256];
};
/* raw3270->flags */
@@ -484,8 +487,6 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
} __attribute__ ((packed)) aua;
} __attribute__ ((packed));
-static unsigned char raw3270_init_data[256];
-static struct raw3270_request raw3270_init_request;
static struct diag210 raw3270_init_diag210;
static DECLARE_MUTEX(raw3270_init_sem);
@@ -644,17 +645,17 @@ __raw3270_size_device(struct raw3270 *rp)
* required (3270 device switched to 'stand-by') and command
* rejects (old devices that can't do 'read partition').
*/
- memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
- memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
- /* Store 'read partition' data stream to raw3270_init_data */
- memcpy(raw3270_init_data, wbuf, sizeof(wbuf));
- INIT_LIST_HEAD(&raw3270_init_request.list);
- raw3270_init_request.ccw.cmd_code = TC_WRITESF;
- raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
- raw3270_init_request.ccw.count = sizeof(wbuf);
- raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
-
- rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ memset(&rp->init_request, 0, sizeof(rp->init_request));
+ memset(&rp->init_data, 0, 256);
+ /* Store 'read partition' data stream to init_data */
+ memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+ INIT_LIST_HEAD(&rp->init_request.list);
+ rp->init_request.ccw.cmd_code = TC_WRITESF;
+ rp->init_request.ccw.flags = CCW_FLAG_SLI;
+ rp->init_request.ccw.count = sizeof(wbuf);
+ rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
+
+ rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc)
/* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
return rc;
@@ -679,18 +680,18 @@ __raw3270_size_device(struct raw3270 *rp)
* The device accepted the 'read partition' command. Now
* set up a read ccw and issue it.
*/
- raw3270_init_request.ccw.cmd_code = TC_READMOD;
- raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
- raw3270_init_request.ccw.count = sizeof(raw3270_init_data);
- raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
- rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ rp->init_request.ccw.cmd_code = TC_READMOD;
+ rp->init_request.ccw.flags = CCW_FLAG_SLI;
+ rp->init_request.ccw.count = sizeof(rp->init_data);
+ rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
+ rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc)
return rc;
/* Got a Query Reply */
- count = sizeof(raw3270_init_data) - raw3270_init_request.rescnt;
- uap = (struct raw3270_ua *) (raw3270_init_data + 1);
+ count = sizeof(rp->init_data) - rp->init_request.rescnt;
+ uap = (struct raw3270_ua *) (rp->init_data + 1);
/* Paranoia check. */
- if (raw3270_init_data[0] != 0x88 || uap->uab.qcode != 0x81)
+ if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
return -EOPNOTSUPP;
/* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h;
@@ -749,18 +750,18 @@ raw3270_reset_device(struct raw3270 *rp)
int rc;
down(&raw3270_init_sem);
- memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
- memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
- /* Store reset data stream to raw3270_init_data/raw3270_init_request */
- raw3270_init_data[0] = TW_KR;
- INIT_LIST_HEAD(&raw3270_init_request.list);
- raw3270_init_request.ccw.cmd_code = TC_EWRITEA;
- raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
- raw3270_init_request.ccw.count = 1;
- raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
+ memset(&rp->init_request, 0, sizeof(rp->init_request));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
+ /* Store reset data stream to init_data/init_request */
+ rp->init_data[0] = TW_KR;
+ INIT_LIST_HEAD(&rp->init_request.list);
+ rp->init_request.ccw.cmd_code = TC_EWRITEA;
+ rp->init_request.ccw.flags = CCW_FLAG_SLI;
+ rp->init_request.ccw.count = 1;
+ rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
rp->view = &raw3270_init_view;
raw3270_init_view.dev = rp;
- rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
+ rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
raw3270_init_view.dev = 0;
rp->view = 0;
up(&raw3270_init_sem);
@@ -854,7 +855,7 @@ raw3270_setup_console(struct ccw_device *cdev)
char *ascebc;
int rc;
- rp = (struct raw3270 *) alloc_bootmem(sizeof(struct raw3270));
+ rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
ascebc = (char *) alloc_bootmem(256);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
@@ -895,7 +896,7 @@ raw3270_create_device(struct ccw_device *cdev)
char *ascebc;
int rc;
- rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL);
+ rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
if (!rp)
return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 0960bef7b199..15b895496a45 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -224,39 +224,6 @@ is_blacklisted (int ssid, int devno)
}
#ifdef CONFIG_PROC_FS
-static int
-__s390_redo_validation(struct subchannel_id schid, void *data)
-{
- int ret;
- struct subchannel *sch;
-
- sch = get_subchannel_by_schid(schid);
- if (sch) {
- /* Already known. */
- put_device(&sch->dev);
- return 0;
- }
- ret = css_probe_device(schid);
- if (ret == -ENXIO)
- return ret; /* We're through. */
- if (ret == -ENOMEM)
- /* Stop validation for now. Bad, but no need for a panic. */
- return ret;
- return 0;
-}
-
-/*
- * Function: s390_redo_validation
- * Look for no longer blacklisted devices
- * FIXME: there must be a better way to do this */
-static inline void
-s390_redo_validation (void)
-{
- CIO_TRACE_EVENT (0, "redoval");
-
- for_each_subchannel(__s390_redo_validation, NULL);
-}
-
/*
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
@@ -281,7 +248,7 @@ blacklist_parse_proc_parameters (char *buf)
return;
}
- s390_redo_validation ();
+ css_schedule_reprobe();
}
/* Iterator struct for all devices. */
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bdfee7fbaa2e..c7319a07ba35 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -404,21 +404,24 @@ ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
}
static int
-__ccwgroup_driver_unregister_device(struct device *dev, void *data)
+__ccwgroup_match_all(struct device *dev, void *data)
{
- __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
- device_unregister(dev);
- put_device(dev);
- return 0;
+ return 1;
}
void
ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
{
+ struct device *dev;
+
/* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver);
- driver_for_each_device(&cdriver->driver, NULL, NULL,
- __ccwgroup_driver_unregister_device);
+ while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
+ __ccwgroup_match_all))) {
+ __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
+ device_unregister(dev);
+ put_device(dev);
+ }
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 72187e54dcac..b00f3ed051a0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -244,8 +244,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
(sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
- (sch->schib.pmcw.lpum == mask) &&
- (sch->vpm == 0)) {
+ (sch->schib.pmcw.lpum == mask)) {
int cc;
cc = cio_clear(sch);
@@ -918,12 +917,13 @@ chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
chp = to_channelpath(container_of(kobj, struct device, kobj));
css = to_css(chp->dev.parent);
- size = sizeof(struct cmg_chars);
+ size = sizeof(struct cmg_entry);
/* Only allow single reads. */
if (off || count < size)
return 0;
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
+ count = size;
return count;
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 07ef3f640f4a..1c3e8e9012b0 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -3,9 +3,10 @@
*
* Linux on zSeries Channel Measurement Facility support
*
- * Copyright 2000,2003 IBM Corporation
+ * Copyright 2000,2006 IBM Corporation
*
- * Author: Arnd Bergmann <arndb@de.ibm.com>
+ * Authors: Arnd Bergmann <arndb@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
*
* original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
*
@@ -96,9 +97,9 @@ module_param(format, bool, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
*
- * all these functions operate on a struct cmf_device. There is only
- * one instance of struct cmb_operations because all cmf_device
- * objects are guaranteed to be of the same type.
+ * Most of these functions operate on a struct ccw_device. There is only
+ * one instance of struct cmb_operations because the format of the measurement
+ * data is guaranteed to be the same for every ccw_device.
*
* @alloc: allocate memory for a channel measurement block,
* either with the help of a special pool or with kmalloc
@@ -107,6 +108,7 @@ module_param(format, bool, 0444);
* @readall: read a measurement block in a common format
* @reset: clear the data in the associated measurement block and
* reset its time stamp
+ * @align: align an allocated block so that the hardware can use it
*/
struct cmb_operations {
int (*alloc) (struct ccw_device*);
@@ -115,11 +117,19 @@ struct cmb_operations {
u64 (*read) (struct ccw_device*, int);
int (*readall)(struct ccw_device*, struct cmbdata *);
void (*reset) (struct ccw_device*);
+ void * (*align) (void *);
struct attribute_group *attr_group;
};
static struct cmb_operations *cmbops;
+struct cmb_data {
+ void *hw_block; /* Pointer to block updated by hardware */
+ void *last_block; /* Last changed block copied from hardware block */
+ int size; /* Size of hw_block and last_block */
+ unsigned long long last_update; /* when last_block was updated */
+};
+
/* our user interface is designed in terms of nanoseconds,
* while the hardware measures total times in its own
* unit.*/
@@ -226,63 +236,229 @@ struct set_schib_struct {
unsigned long address;
wait_queue_head_t wait;
int ret;
+ struct kref kref;
};
+static void cmf_set_schib_release(struct kref *kref)
+{
+ struct set_schib_struct *set_data;
+
+ set_data = container_of(kref, struct set_schib_struct, kref);
+ kfree(set_data);
+}
+
+#define CMF_PENDING 1
+
static int set_schib_wait(struct ccw_device *cdev, u32 mme,
int mbfc, unsigned long address)
{
- struct set_schib_struct s = {
- .mme = mme,
- .mbfc = mbfc,
- .address = address,
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
- };
+ struct set_schib_struct *set_data;
+ int ret;
spin_lock_irq(cdev->ccwlock);
- s.ret = set_schib(cdev, mme, mbfc, address);
- if (s.ret != -EBUSY) {
- goto out_nowait;
+ if (!cdev->private->cmb) {
+ ret = -ENODEV;
+ goto out;
}
+ set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
+ if (!set_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ init_waitqueue_head(&set_data->wait);
+ kref_init(&set_data->kref);
+ set_data->mme = mme;
+ set_data->mbfc = mbfc;
+ set_data->address = address;
+
+ ret = set_schib(cdev, mme, mbfc, address);
+ if (ret != -EBUSY)
+ goto out_put;
if (cdev->private->state != DEV_STATE_ONLINE) {
- s.ret = -EBUSY;
/* if the device is not online, don't even try again */
- goto out_nowait;
+ ret = -EBUSY;
+ goto out_put;
}
+
cdev->private->state = DEV_STATE_CMFCHANGE;
- cdev->private->cmb_wait = &s;
- s.ret = 1;
+ set_data->ret = CMF_PENDING;
+ cdev->private->cmb_wait = set_data;
spin_unlock_irq(cdev->ccwlock);
- if (wait_event_interruptible(s.wait, s.ret != 1)) {
+ if (wait_event_interruptible(set_data->wait,
+ set_data->ret != CMF_PENDING)) {
spin_lock_irq(cdev->ccwlock);
- if (s.ret == 1) {
- s.ret = -ERESTARTSYS;
- cdev->private->cmb_wait = 0;
+ if (set_data->ret == CMF_PENDING) {
+ set_data->ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE;
}
spin_unlock_irq(cdev->ccwlock);
}
- return s.ret;
-
-out_nowait:
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->cmb_wait = NULL;
+ ret = set_data->ret;
+out_put:
+ kref_put(&set_data->kref, cmf_set_schib_release);
+out:
spin_unlock_irq(cdev->ccwlock);
- return s.ret;
+ return ret;
}
void retry_set_schib(struct ccw_device *cdev)
{
- struct set_schib_struct *s;
+ struct set_schib_struct *set_data;
+
+ set_data = cdev->private->cmb_wait;
+ if (!set_data) {
+ WARN_ON(1);
+ return;
+ }
+ kref_get(&set_data->kref);
+ set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
+ set_data->address);
+ wake_up(&set_data->wait);
+ kref_put(&set_data->kref, cmf_set_schib_release);
+}
+
+static int cmf_copy_block(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ void *reference_buf;
+ void *hw_block;
+ struct cmb_data *cmb_data;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (stsch(sch->schid, &sch->schib))
+ return -ENODEV;
+
+ if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
+ /* Don't copy if a start function is in progress. */
+ if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) &&
+ (sch->schib.scsw.actl &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
+ (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))
+ return -EBUSY;
+ }
+ cmb_data = cdev->private->cmb;
+ hw_block = cmbops->align(cmb_data->hw_block);
+ if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
+ /* No need to copy. */
+ return 0;
+ reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
+ if (!reference_buf)
+ return -ENOMEM;
+ /* Ensure consistency of block copied from hardware. */
+ do {
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
+ memcpy(reference_buf, hw_block, cmb_data->size);
+ } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+ cmb_data->last_update = get_clock();
+ kfree(reference_buf);
+ return 0;
+}
+
+struct copy_block_struct {
+ wait_queue_head_t wait;
+ int ret;
+ struct kref kref;
+};
+
+static void cmf_copy_block_release(struct kref *kref)
+{
+ struct copy_block_struct *copy_block;
+
+ copy_block = container_of(kref, struct copy_block_struct, kref);
+ kfree(copy_block);
+}
+
+static int cmf_cmb_copy_wait(struct ccw_device *cdev)
+{
+ struct copy_block_struct *copy_block;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ ret = -ENODEV;
+ goto out;
+ }
+ copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
+ if (!copy_block) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ init_waitqueue_head(&copy_block->wait);
+ kref_init(&copy_block->kref);
+
+ ret = cmf_copy_block(cdev);
+ if (ret != -EBUSY)
+ goto out_put;
+
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ ret = -EBUSY;
+ goto out_put;
+ }
+
+ cdev->private->state = DEV_STATE_CMFUPDATE;
+ copy_block->ret = CMF_PENDING;
+ cdev->private->cmb_wait = copy_block;
+
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (wait_event_interruptible(copy_block->wait,
+ copy_block->ret != CMF_PENDING)) {
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (copy_block->ret == CMF_PENDING) {
+ copy_block->ret = -ERESTARTSYS;
+ if (cdev->private->state == DEV_STATE_CMFUPDATE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ }
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cdev->private->cmb_wait = NULL;
+ ret = copy_block->ret;
+out_put:
+ kref_put(&copy_block->kref, cmf_copy_block_release);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+void cmf_retry_copy_block(struct ccw_device *cdev)
+{
+ struct copy_block_struct *copy_block;
- s = cdev->private->cmb_wait;
- cdev->private->cmb_wait = 0;
- if (!s) {
+ copy_block = cdev->private->cmb_wait;
+ if (!copy_block) {
WARN_ON(1);
return;
}
- s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
- wake_up(&s->wait);
+ kref_get(&copy_block->kref);
+ copy_block->ret = cmf_copy_block(cdev);
+ wake_up(&copy_block->wait);
+ kref_put(&copy_block->kref, cmf_copy_block_release);
+}
+
+static void cmf_generic_reset(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ if (cmb_data) {
+ memset(cmb_data->last_block, 0, cmb_data->size);
+ /*
+ * Need to reset hw block as well to make the hardware start
+ * from 0 again.
+ */
+ memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
+ cmb_data->last_update = 0;
+ }
+ cdev->private->cmb_start_time = get_clock();
+ spin_unlock_irq(cdev->ccwlock);
}
/**
@@ -343,8 +519,8 @@ struct cmb {
/* insert a single device into the cmb_area list
* called with cmb_area.lock held from alloc_cmb
*/
-static inline int
-alloc_cmb_single (struct ccw_device *cdev)
+static inline int alloc_cmb_single (struct ccw_device *cdev,
+ struct cmb_data *cmb_data)
{
struct cmb *cmb;
struct ccw_device_private *node;
@@ -358,10 +534,12 @@ alloc_cmb_single (struct ccw_device *cdev)
/* find first unused cmb in cmb_area.mem.
* this is a little tricky: cmb_area.list
- * remains sorted by ->cmb pointers */
+ * remains sorted by ->cmb->hw_data pointers */
cmb = cmb_area.mem;
list_for_each_entry(node, &cmb_area.list, cmb_list) {
- if ((struct cmb*)node->cmb > cmb)
+ struct cmb_data *data;
+ data = node->cmb;
+ if ((struct cmb*)data->hw_block > cmb)
break;
cmb++;
}
@@ -372,7 +550,8 @@ alloc_cmb_single (struct ccw_device *cdev)
/* insert new cmb */
list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
- cdev->private->cmb = cmb;
+ cmb_data->hw_block = cmb;
+ cdev->private->cmb = cmb_data;
ret = 0;
out:
spin_unlock_irq(cdev->ccwlock);
@@ -385,7 +564,19 @@ alloc_cmb (struct ccw_device *cdev)
int ret;
struct cmb *mem;
ssize_t size;
+ struct cmb_data *cmb_data;
+
+ /* Allocate private cmb_data. */
+ cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+ if (!cmb_data)
+ return -ENOMEM;
+ cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
+ if (!cmb_data->last_block) {
+ kfree(cmb_data);
+ return -ENOMEM;
+ }
+ cmb_data->size = sizeof(struct cmb);
spin_lock(&cmb_area.lock);
if (!cmb_area.mem) {
@@ -414,29 +605,36 @@ alloc_cmb (struct ccw_device *cdev)
}
/* do the actual allocation */
- ret = alloc_cmb_single(cdev);
+ ret = alloc_cmb_single(cdev, cmb_data);
out:
spin_unlock(&cmb_area.lock);
-
+ if (ret) {
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ }
return ret;
}
-static void
-free_cmb(struct ccw_device *cdev)
+static void free_cmb(struct ccw_device *cdev)
{
struct ccw_device_private *priv;
-
- priv = cdev->private;
+ struct cmb_data *cmb_data;
spin_lock(&cmb_area.lock);
spin_lock_irq(cdev->ccwlock);
+ priv = cdev->private;
+
if (list_empty(&priv->cmb_list)) {
/* already freed */
goto out;
}
+ cmb_data = priv->cmb;
priv->cmb = NULL;
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
list_del_init(&priv->cmb_list);
if (list_empty(&cmb_area.list)) {
@@ -451,83 +649,97 @@ out:
spin_unlock(&cmb_area.lock);
}
-static int
-set_cmb(struct ccw_device *cdev, u32 mme)
+static int set_cmb(struct ccw_device *cdev, u32 mme)
{
u16 offset;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
- if (!cdev->private->cmb)
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL;
-
- offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
+ }
+ cmb_data = cdev->private->cmb;
+ offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 0, offset);
}
-static u64
-read_cmb (struct ccw_device *cdev, int index)
+static u64 read_cmb (struct ccw_device *cdev, int index)
{
- /* yes, we have to put it on the stack
- * because the cmb must only be accessed
- * atomically, e.g. with mvc */
- struct cmb cmb;
- unsigned long flags;
+ struct cmb *cmb;
u32 val;
+ int ret;
+ unsigned long flags;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return 0;
spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- return 0;
+ ret = 0;
+ goto out;
}
-
- cmb = *(struct cmb*)cdev->private->cmb;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
switch (index) {
case cmb_ssch_rsch_count:
- return cmb.ssch_rsch_count;
+ ret = cmb->ssch_rsch_count;
+ goto out;
case cmb_sample_count:
- return cmb.sample_count;
+ ret = cmb->sample_count;
+ goto out;
case cmb_device_connect_time:
- val = cmb.device_connect_time;
+ val = cmb->device_connect_time;
break;
case cmb_function_pending_time:
- val = cmb.function_pending_time;
+ val = cmb->function_pending_time;
break;
case cmb_device_disconnect_time:
- val = cmb.device_disconnect_time;
+ val = cmb->device_disconnect_time;
break;
case cmb_control_unit_queuing_time:
- val = cmb.control_unit_queuing_time;
+ val = cmb->control_unit_queuing_time;
break;
case cmb_device_active_only_time:
- val = cmb.device_active_only_time;
+ val = cmb->device_active_only_time;
break;
default:
- return 0;
+ ret = 0;
+ goto out;
}
- return time_to_avg_nsec(val, cmb.sample_count);
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
}
-static int
-readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
{
- /* yes, we have to put it on the stack
- * because the cmb must only be accessed
- * atomically, e.g. with mvc */
- struct cmb cmb;
- unsigned long flags;
+ struct cmb *cmb;
+ struct cmb_data *cmb_data;
u64 time;
+ unsigned long flags;
+ int ret;
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- return -ENODEV;
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
}
-
- cmb = *(struct cmb*)cdev->private->cmb;
- time = get_clock() - cdev->private->cmb_start_time;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ cmb = cmb_data->last_block;
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
memset(data, 0, sizeof(struct cmbdata));
@@ -538,31 +750,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
data->elapsed_time = (time * 1000) >> 12;
/* copy data to new structure */
- data->ssch_rsch_count = cmb.ssch_rsch_count;
- data->sample_count = cmb.sample_count;
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */
- data->device_connect_time = time_to_nsec(cmb.device_connect_time);
- data->function_pending_time = time_to_nsec(cmb.function_pending_time);
- data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time
- = time_to_nsec(cmb.control_unit_queuing_time);
+ = time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time
- = time_to_nsec(cmb.device_active_only_time);
+ = time_to_nsec(cmb->device_active_only_time);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
- return 0;
+static void reset_cmb(struct ccw_device *cdev)
+{
+ cmf_generic_reset(cdev);
}
-static void
-reset_cmb(struct ccw_device *cdev)
+static void * align_cmb(void *area)
{
- struct cmb *cmb;
- spin_lock_irq(cdev->ccwlock);
- cmb = cdev->private->cmb;
- if (cmb)
- memset (cmb, 0, sizeof (*cmb));
- cdev->private->cmb_start_time = get_clock();
- spin_unlock_irq(cdev->ccwlock);
+ return area;
}
static struct attribute_group cmf_attr_group;
@@ -574,6 +787,7 @@ static struct cmb_operations cmbops_basic = {
.read = read_cmb,
.readall = readall_cmb,
.reset = reset_cmb,
+ .align = align_cmb,
.attr_group = &cmf_attr_group,
};
@@ -610,22 +824,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
return (struct cmbe*)addr;
}
-static int
-alloc_cmbe (struct ccw_device *cdev)
+static int alloc_cmbe (struct ccw_device *cdev)
{
struct cmbe *cmbe;
- cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
+ struct cmb_data *cmb_data;
+ int ret;
+
+ cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
if (!cmbe)
return -ENOMEM;
-
+ cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+ if (!cmb_data) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
+ if (!cmb_data->last_block) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ cmb_data->size = sizeof(struct cmbe);
spin_lock_irq(cdev->ccwlock);
if (cdev->private->cmb) {
- kfree(cmbe);
spin_unlock_irq(cdev->ccwlock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_free;
}
-
- cdev->private->cmb = cmbe;
+ cmb_data->hw_block = cmbe;
+ cdev->private->cmb = cmb_data;
spin_unlock_irq(cdev->ccwlock);
/* activate global measurement if this is the first channel */
@@ -636,14 +862,24 @@ alloc_cmbe (struct ccw_device *cdev)
spin_unlock(&cmb_area.lock);
return 0;
+out_free:
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ kfree(cmbe);
+ return ret;
}
-static void
-free_cmbe (struct ccw_device *cdev)
+static void free_cmbe (struct ccw_device *cdev)
{
+ struct cmb_data *cmb_data;
+
spin_lock_irq(cdev->ccwlock);
- kfree(cdev->private->cmb);
+ cmb_data = cdev->private->cmb;
cdev->private->cmb = NULL;
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
spin_unlock_irq(cdev->ccwlock);
/* deactivate global measurement if this is the last channel */
@@ -654,89 +890,105 @@ free_cmbe (struct ccw_device *cdev)
spin_unlock(&cmb_area.lock);
}
-static int
-set_cmbe(struct ccw_device *cdev, u32 mme)
+static int set_cmbe(struct ccw_device *cdev, u32 mme)
{
unsigned long mba;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
- if (!cdev->private->cmb)
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL;
- mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
+ }
+ cmb_data = cdev->private->cmb;
+ mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 1, mba);
}
-u64
-read_cmbe (struct ccw_device *cdev, int index)
+static u64 read_cmbe (struct ccw_device *cdev, int index)
{
- /* yes, we have to put it on the stack
- * because the cmb must only be accessed
- * atomically, e.g. with mvc */
- struct cmbe cmb;
- unsigned long flags;
+ struct cmbe *cmb;
+ struct cmb_data *cmb_data;
u32 val;
+ int ret;
+ unsigned long flags;
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
return 0;
- }
- cmb = *cmbe_align(cdev->private->cmb);
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = 0;
+ goto out;
+ }
+ cmb = cmb_data->last_block;
switch (index) {
case cmb_ssch_rsch_count:
- return cmb.ssch_rsch_count;
+ ret = cmb->ssch_rsch_count;
+ goto out;
case cmb_sample_count:
- return cmb.sample_count;
+ ret = cmb->sample_count;
+ goto out;
case cmb_device_connect_time:
- val = cmb.device_connect_time;
+ val = cmb->device_connect_time;
break;
case cmb_function_pending_time:
- val = cmb.function_pending_time;
+ val = cmb->function_pending_time;
break;
case cmb_device_disconnect_time:
- val = cmb.device_disconnect_time;
+ val = cmb->device_disconnect_time;
break;
case cmb_control_unit_queuing_time:
- val = cmb.control_unit_queuing_time;
+ val = cmb->control_unit_queuing_time;
break;
case cmb_device_active_only_time:
- val = cmb.device_active_only_time;
+ val = cmb->device_active_only_time;
break;
case cmb_device_busy_time:
- val = cmb.device_busy_time;
+ val = cmb->device_busy_time;
break;
case cmb_initial_command_response_time:
- val = cmb.initial_command_response_time;
+ val = cmb->initial_command_response_time;
break;
default:
- return 0;
+ ret = 0;
+ goto out;
}
- return time_to_avg_nsec(val, cmb.sample_count);
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
}
-static int
-readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
{
- /* yes, we have to put it on the stack
- * because the cmb must only be accessed
- * atomically, e.g. with mvc */
- struct cmbe cmb;
- unsigned long flags;
+ struct cmbe *cmb;
+ struct cmb_data *cmb_data;
u64 time;
+ unsigned long flags;
+ int ret;
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- return -ENODEV;
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
}
-
- cmb = *cmbe_align(cdev->private->cmb);
- time = get_clock() - cdev->private->cmb_start_time;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
memset (data, 0, sizeof(struct cmbdata));
@@ -746,35 +998,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
/* conver to nanoseconds */
data->elapsed_time = (time * 1000) >> 12;
+ cmb = cmb_data->last_block;
/* copy data to new structure */
- data->ssch_rsch_count = cmb.ssch_rsch_count;
- data->sample_count = cmb.sample_count;
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */
- data->device_connect_time = time_to_nsec(cmb.device_connect_time);
- data->function_pending_time = time_to_nsec(cmb.function_pending_time);
- data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time
- = time_to_nsec(cmb.control_unit_queuing_time);
+ = time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time
- = time_to_nsec(cmb.device_active_only_time);
- data->device_busy_time = time_to_nsec(cmb.device_busy_time);
+ = time_to_nsec(cmb->device_active_only_time);
+ data->device_busy_time = time_to_nsec(cmb->device_busy_time);
data->initial_command_response_time
- = time_to_nsec(cmb.initial_command_response_time);
+ = time_to_nsec(cmb->initial_command_response_time);
- return 0;
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
}
-static void
-reset_cmbe(struct ccw_device *cdev)
+static void reset_cmbe(struct ccw_device *cdev)
{
- struct cmbe *cmb;
- spin_lock_irq(cdev->ccwlock);
- cmb = cmbe_align(cdev->private->cmb);
- if (cmb)
- memset (cmb, 0, sizeof (*cmb));
- cdev->private->cmb_start_time = get_clock();
- spin_unlock_irq(cdev->ccwlock);
+ cmf_generic_reset(cdev);
+}
+
+static void * align_cmbe(void *area)
+{
+ return cmbe_align(area);
}
static struct attribute_group cmf_attr_group_ext;
@@ -786,6 +1041,7 @@ static struct cmb_operations cmbops_extended = {
.read = read_cmbe,
.readall = readall_cmbe,
.reset = reset_cmbe,
+ .align = align_cmbe,
.attr_group = &cmf_attr_group_ext,
};
@@ -803,14 +1059,19 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
struct ccw_device *cdev;
long interval;
unsigned long count;
+ struct cmb_data *cmb_data;
cdev = to_ccwdev(dev);
- interval = get_clock() - cdev->private->cmb_start_time;
count = cmf_read(cdev, cmb_sample_count);
- if (count)
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ if (count) {
+ interval = cmb_data->last_update -
+ cdev->private->cmb_start_time;
interval /= count;
- else
+ } else
interval = -1;
+ spin_unlock_irq(cdev->ccwlock);
return sprintf(buf, "%ld\n", interval);
}
@@ -823,7 +1084,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
int ret;
ret = cmf_readall(to_ccwdev(dev), &data);
- if (ret)
+ if (ret == -EAGAIN || ret == -ENODEV)
+ /* No data (yet/currently) available to use for calculation. */
+ return sprintf(buf, "n/a\n");
+ else if (ret)
return ret;
utilization = data.device_connect_time +
@@ -982,6 +1246,13 @@ cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
return cmbops->readall(cdev, data);
}
+/* Reenable cmf when a disconnected device becomes available again. */
+int cmf_reenable(struct ccw_device *cdev)
+{
+ cmbops->reset(cdev);
+ return cmbops->set(cdev, 2);
+}
+
static int __init
init_cmf(void)
{
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 74ea8aac4b7d..1d3be80797f8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -19,9 +19,11 @@
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
+#include "device.h"
int need_rescan = 0;
int css_init_done = 0;
+static int need_reprobe = 0;
static int max_ssid = 0;
struct channel_subsystem *css[__MAX_CSSID + 1];
@@ -339,6 +341,67 @@ typedef void (*workfunc)(void *);
DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
struct workqueue_struct *slow_path_wq;
+/* Reprobe subchannel if unregistered. */
+static int reprobe_subchannel(struct subchannel_id schid, void *data)
+{
+ struct subchannel *sch;
+ int ret;
+
+ CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+ if (need_reprobe)
+ return -EAGAIN;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ /* Already known. */
+ put_device(&sch->dev);
+ return 0;
+ }
+
+ ret = css_probe_device(schid);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENXIO:
+ case -ENOMEM:
+ /* These should abort looping */
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* Work function used to reprobe all unregistered subchannels. */
+static void reprobe_all(void *data)
+{
+ int ret;
+
+ CIO_MSG_EVENT(2, "reprobe start\n");
+
+ need_reprobe = 0;
+ /* Make sure initial subchannel scan is done. */
+ wait_event(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ ret = for_each_subchannel(reprobe_subchannel, NULL);
+
+ CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
+ need_reprobe);
+}
+
+DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+
+/* Schedule reprobing of all unregistered subchannels. */
+void css_schedule_reprobe(void)
+{
+ need_reprobe = 1;
+ queue_work(ccw_device_work, &css_reprobe_work);
+}
+
+EXPORT_SYMBOL_GPL(css_schedule_reprobe);
+
/*
* Rescan for new devices. FIXME: This is slow.
* This function is called when we have lost CRWs due to overflows and we have
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 8e3053c2a451..eafde43e8410 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -133,8 +133,8 @@ struct css_driver io_subchannel_driver = {
struct workqueue_struct *ccw_device_work;
struct workqueue_struct *ccw_device_notify_work;
-static wait_queue_head_t ccw_device_init_wq;
-static atomic_t ccw_device_init_count;
+wait_queue_head_t ccw_device_init_wq;
+atomic_t ccw_device_init_count;
static int __init
init_ccw_bus_type (void)
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 11587ebb7289..00be9a5b4acd 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -1,6 +1,10 @@
#ifndef S390_DEVICE_H
#define S390_DEVICE_H
+#include <asm/ccwdev.h>
+#include <asm/atomic.h>
+#include <linux/wait.h>
+
/*
* states of the device statemachine
*/
@@ -23,6 +27,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED,
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
+ DEV_STATE_CMFUPDATE,
/* last element! */
NR_DEV_STATES
};
@@ -67,6 +72,8 @@ dev_fsm_final_state(struct ccw_device *cdev)
extern struct workqueue_struct *ccw_device_work;
extern struct workqueue_struct *ccw_device_notify_work;
+extern wait_queue_head_t ccw_device_init_wq;
+extern atomic_t ccw_device_init_count;
void io_subchannel_recog_done(struct ccw_device *cdev);
@@ -112,5 +119,8 @@ int ccw_device_stlck(struct ccw_device *);
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
+/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
+void cmf_retry_copy_block(struct ccw_device *);
+int cmf_reenable(struct ccw_device *);
#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 49ec562d7f60..7d0dd72635eb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -336,8 +336,11 @@ ccw_device_oper_notify(void *data)
if (!ret)
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg((void *)cdev);
- else
+ else {
+ /* Reenable channel measurements, if needed. */
+ cmf_reenable(cdev);
wake_up(&cdev->private->wait_q);
+ }
}
/*
@@ -861,6 +864,8 @@ ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB;
/* Accumulate status. We don't do basic sense. */
ccw_device_accumulate_irb(cdev, irb);
+ /* Remember to clear irb to avoid residuals. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try to start delayed device verification. */
ccw_device_online_verify(cdev, 0);
/* Note: Don't call handler for cio initiated clear! */
@@ -1093,6 +1098,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
dev_fsm_event(cdev, dev_event);
}
+static void ccw_device_update_cmfblock(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ cmf_retry_copy_block(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
@@ -1247,6 +1259,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
[DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
},
+ [DEV_STATE_CMFUPDATE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
+ [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
+ },
};
/*
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 795abb5a65ba..b266ad8e14ff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -78,7 +78,8 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY ||
+ cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1;
@@ -270,7 +271,8 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
* We didn't get channel end / device end. Check if path
* verification has been started; we can retry after it has
* finished. We also retry unit checks except for command reject
- * or intervention required.
+ * or intervention required. Also check for long busy
+ * conditions.
*/
if (cdev->private->flags.doverify ||
cdev->private->state == DEV_STATE_VERIFY)
@@ -279,6 +281,10 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
!(irb->ecw[0] &
(SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
cdev->private->intparm = -EAGAIN;
+ else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
+ (irb->scsw.dstat & DEV_STAT_DEV_END) &&
+ (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
+ cdev->private->intparm = -EAGAIN;
else
cdev->private->intparm = -EIO;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index f94419b334f7..2eded55ae88d 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1140,10 +1140,9 @@ list_modified:
}
}
/* re-insert all entries from the failed_list into ipm_list */
- list_for_each_entry_safe(ipm, tmp, &failed_list, list) {
- list_del_init(&ipm->list);
- list_add_tail(&ipm->list, &card->ipm_list);
- }
+ list_for_each_entry_safe(ipm, tmp, &failed_list, list)
+ list_move_tail(&ipm->list, &card->ipm_list);
+
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index f99e55308b32..8dc75002acbe 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/workqueue.h>
#include <linux/time.h>
+#include <linux/kthread.h>
#include <asm/lowcore.h>
@@ -56,8 +57,6 @@ s390_collect_crw_info(void *param)
unsigned int chain;
sem = (struct semaphore *)param;
- /* Set a nice name. */
- daemonize("kmcheck");
repeat:
down_interruptible(sem);
slow = 0;
@@ -516,7 +515,7 @@ arch_initcall(machine_check_init);
static int __init
machine_check_crw_init (void)
{
- kernel_thread(s390_collect_crw_info, &m_sem, CLONE_FS|CLONE_FILES);
+ kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4682c8b8bd24..909731b99d26 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -167,7 +167,7 @@ zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
* initiates adapter recovery which is done
* asynchronously
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@@ -203,7 +203,7 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
* purpose: Wrappper for zfcp_erp_adapter_reopen_internal
* used to ensure the correct locking
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@@ -469,7 +469,7 @@ zfcp_test_link(struct zfcp_port *port)
* initiates Forced Reopen recovery which is done
* asynchronously
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@@ -509,7 +509,7 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
* purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
* used to ensure the correct locking
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@@ -536,7 +536,7 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@@ -605,7 +605,7 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@@ -1805,7 +1805,7 @@ zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
* purpose: Wrappper for zfcp_erp_port_reopen_all_internal
* used to ensure the correct locking
*
- * returns: 0 - initiated action succesfully
+ * returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index 5bf3dd901b65..21737b7e86a1 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -755,7 +755,7 @@ static int __init wd_init(void)
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, WD_OBPNAME))
+ if (!strcmp(edev->ofdev.node->name, WD_OBPNAME))
goto ebus_done;
}
}
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index cf5b476b5496..d7e4bb41bd79 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -29,8 +29,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define PROMLIB_INTERNAL
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -39,10 +37,10 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <asm/oplib.h>
+#include <asm/prom.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/openpromio.h>
@@ -51,15 +49,20 @@
#include <asm/pbm.h>
#endif
+MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
+MODULE_DESCRIPTION("OPENPROM Configuration Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
/* Private data kept by the driver for each descriptor. */
typedef struct openprom_private_data
{
- int current_node; /* Current node for SunOS ioctls. */
- int lastnode; /* Last valid node used by BSD ioctls. */
+ struct device_node *current_node; /* Current node for SunOS ioctls. */
+ struct device_node *lastnode; /* Last valid node used by BSD ioctls. */
} DATA;
/* ID of the PROM node containing all of the EEPROM options. */
-static int options_node = 0;
+static struct device_node *options_node;
/*
* Copy an openpromio structure into kernel space from user space.
@@ -87,9 +90,8 @@ static int copyin(struct openpromio __user *info, struct openpromio **opp_p)
if (bufsize > OPROMMAXPARAM)
bufsize = OPROMMAXPARAM;
- if (!(*opp_p = kmalloc(sizeof(int) + bufsize + 1, GFP_KERNEL)))
+ if (!(*opp_p = kzalloc(sizeof(int) + bufsize + 1, GFP_KERNEL)))
return -ENOMEM;
- memset(*opp_p, 0, sizeof(int) + bufsize + 1);
if (copy_from_user(&(*opp_p)->oprom_array,
&info->oprom_array, bufsize)) {
@@ -107,10 +109,9 @@ static int getstrings(struct openpromio __user *info, struct openpromio **opp_p)
if (!info || !opp_p)
return -EFAULT;
- if (!(*opp_p = kmalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL)))
+ if (!(*opp_p = kzalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL)))
return -ENOMEM;
- memset(*opp_p, 0, sizeof(int) + OPROMMAXPARAM + 1);
(*opp_p)->oprom_size = 0;
n = bufsize = 0;
@@ -140,16 +141,164 @@ static int copyout(void __user *info, struct openpromio *opp, int len)
return 0;
}
+static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
+{
+ void *pval;
+ int len;
+
+ pval = of_get_property(dp, op->oprom_array, &len);
+ if (!pval || len <= 0 || len > bufsize)
+ return copyout(argp, op, sizeof(int));
+
+ memcpy(op->oprom_array, pval, len);
+ op->oprom_array[len] = '\0';
+ op->oprom_size = len;
+
+ return copyout(argp, op, sizeof(int) + bufsize);
+}
+
+static int opromnxtprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
+{
+ struct property *prop;
+ int len;
+
+ if (op->oprom_array[0] == '\0') {
+ prop = dp->properties;
+ if (!prop)
+ return copyout(argp, op, sizeof(int));
+ len = strlen(prop->name);
+ } else {
+ prop = of_find_property(dp, op->oprom_array, NULL);
+
+ if (!prop ||
+ !prop->next ||
+ (len = strlen(prop->next->name)) + 1 > bufsize)
+ return copyout(argp, op, sizeof(int));
+
+ prop = prop->next;
+ }
+
+ memcpy(op->oprom_array, prop->name, len);
+ op->oprom_array[len] = '\0';
+ op->oprom_size = ++len;
+
+ return copyout(argp, op, sizeof(int) + bufsize);
+}
+
+static int opromsetopt(struct device_node *dp, struct openpromio *op, int bufsize)
+{
+ char *buf = op->oprom_array + strlen(op->oprom_array) + 1;
+ int len = op->oprom_array + bufsize - buf;
+
+ return of_set_property(options_node, op->oprom_array, buf, len);
+}
+
+static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
+{
+ phandle ph;
+
+ BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
+
+ if (bufsize < sizeof(phandle))
+ return -EINVAL;
+
+ ph = *((int *) op->oprom_array);
+ if (ph) {
+ dp = of_find_node_by_phandle(ph);
+ if (!dp)
+ return -EINVAL;
+
+ switch (cmd) {
+ case OPROMNEXT:
+ dp = dp->sibling;
+ break;
+
+ case OPROMCHILD:
+ dp = dp->child;
+ break;
+
+ case OPROMSETCUR:
+ default:
+ break;
+ };
+ } else {
+ /* Sibling of node zero is the root node. */
+ if (cmd != OPROMNEXT)
+ return -EINVAL;
+
+ dp = of_find_node_by_path("/");
+ }
+
+ ph = 0;
+ if (dp)
+ ph = dp->node;
+
+ data->current_node = dp;
+ *((int *) op->oprom_array) = ph;
+ op->oprom_size = sizeof(phandle);
+
+ return copyout(argp, op, bufsize + sizeof(int));
+}
+
+static int oprompci2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
+{
+ int err = -EINVAL;
+
+ if (bufsize >= 2*sizeof(int)) {
+#ifdef CONFIG_PCI
+ struct pci_dev *pdev;
+ struct pcidev_cookie *pcp;
+ pdev = pci_find_slot (((int *) op->oprom_array)[0],
+ ((int *) op->oprom_array)[1]);
+
+ pcp = pdev->sysdata;
+ if (pcp != NULL) {
+ dp = pcp->prom_node;
+ data->current_node = dp;
+ *((int *)op->oprom_array) = dp->node;
+ op->oprom_size = sizeof(int);
+ err = copyout(argp, op, bufsize + sizeof(int));
+ }
+#endif
+ }
+
+ return err;
+}
+
+static int oprompath2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
+{
+ dp = of_find_node_by_path(op->oprom_array);
+ data->current_node = dp;
+ *((int *)op->oprom_array) = dp->node;
+ op->oprom_size = sizeof(int);
+
+ return copyout(argp, op, bufsize + sizeof(int));
+}
+
+static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsize)
+{
+ char *buf = saved_command_line;
+ int len = strlen(buf);
+
+ if (len > bufsize)
+ return -EINVAL;
+
+ strcpy(op->oprom_array, buf);
+ op->oprom_size = len;
+
+ return copyout(argp, op, bufsize + sizeof(int));
+}
+
/*
* SunOS and Solaris /dev/openprom ioctl calls.
*/
static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg, int node)
+ unsigned int cmd, unsigned long arg,
+ struct device_node *dp)
{
- DATA *data = (DATA *) file->private_data;
- char buffer[OPROMMAXPARAM+1], *buf;
+ DATA *data = file->private_data;
struct openpromio *opp;
- int bufsize, len, error = 0;
+ int bufsize, error = 0;
static int cnt;
void __user *argp = (void __user *)arg;
@@ -164,119 +313,35 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
switch (cmd) {
case OPROMGETOPT:
case OPROMGETPROP:
- len = prom_getproplen(node, opp->oprom_array);
-
- if (len <= 0 || len > bufsize) {
- error = copyout(argp, opp, sizeof(int));
- break;
- }
-
- len = prom_getproperty(node, opp->oprom_array, buffer, bufsize);
-
- memcpy(opp->oprom_array, buffer, len);
- opp->oprom_array[len] = '\0';
- opp->oprom_size = len;
-
- error = copyout(argp, opp, sizeof(int) + bufsize);
+ error = opromgetprop(argp, dp, opp, bufsize);
break;
case OPROMNXTOPT:
case OPROMNXTPROP:
- buf = prom_nextprop(node, opp->oprom_array, buffer);
-
- len = strlen(buf);
- if (len == 0 || len + 1 > bufsize) {
- error = copyout(argp, opp, sizeof(int));
- break;
- }
-
- memcpy(opp->oprom_array, buf, len);
- opp->oprom_array[len] = '\0';
- opp->oprom_size = ++len;
-
- error = copyout(argp, opp, sizeof(int) + bufsize);
+ error = opromnxtprop(argp, dp, opp, bufsize);
break;
case OPROMSETOPT:
case OPROMSETOPT2:
- buf = opp->oprom_array + strlen(opp->oprom_array) + 1;
- len = opp->oprom_array + bufsize - buf;
-
- error = prom_setprop(options_node, opp->oprom_array,
- buf, len);
-
- if (error < 0)
- error = -EINVAL;
+ error = opromsetopt(dp, opp, bufsize);
break;
case OPROMNEXT:
case OPROMCHILD:
case OPROMSETCUR:
- if (bufsize < sizeof(int)) {
- error = -EINVAL;
- break;
- }
-
- node = *((int *) opp->oprom_array);
-
- switch (cmd) {
- case OPROMNEXT: node = __prom_getsibling(node); break;
- case OPROMCHILD: node = __prom_getchild(node); break;
- case OPROMSETCUR: break;
- }
-
- data->current_node = node;
- *((int *)opp->oprom_array) = node;
- opp->oprom_size = sizeof(int);
-
- error = copyout(argp, opp, bufsize + sizeof(int));
+ error = opromnext(argp, cmd, dp, opp, bufsize, data);
break;
case OPROMPCI2NODE:
- error = -EINVAL;
-
- if (bufsize >= 2*sizeof(int)) {
-#ifdef CONFIG_PCI
- struct pci_dev *pdev;
- struct pcidev_cookie *pcp;
- pdev = pci_find_slot (((int *) opp->oprom_array)[0],
- ((int *) opp->oprom_array)[1]);
-
- pcp = pdev->sysdata;
- if (pcp != NULL) {
- node = pcp->prom_node->node;
- data->current_node = node;
- *((int *)opp->oprom_array) = node;
- opp->oprom_size = sizeof(int);
- error = copyout(argp, opp, bufsize + sizeof(int));
- }
-#endif
- }
+ error = oprompci2node(argp, dp, opp, bufsize, data);
break;
case OPROMPATH2NODE:
- node = prom_finddevice(opp->oprom_array);
- data->current_node = node;
- *((int *)opp->oprom_array) = node;
- opp->oprom_size = sizeof(int);
-
- error = copyout(argp, opp, bufsize + sizeof(int));
+ error = oprompath2node(argp, dp, opp, bufsize, data);
break;
case OPROMGETBOOTARGS:
- buf = saved_command_line;
-
- len = strlen(buf);
-
- if (len > bufsize) {
- error = -EINVAL;
- break;
- }
-
- strcpy(opp->oprom_array, buf);
- opp->oprom_size = len;
-
- error = copyout(argp, opp, bufsize + sizeof(int));
+ error = opromgetbootargs(argp, opp, bufsize);
break;
case OPROMU2P:
@@ -297,25 +362,14 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
return error;
}
-
-/* Return nonzero if a specific node is in the PROM device tree. */
-static int intree(int root, int node)
+static struct device_node *get_node(phandle n, DATA *data)
{
- for (; root != 0; root = prom_getsibling(root))
- if (root == node || intree(prom_getchild(root),node))
- return 1;
- return 0;
-}
+ struct device_node *dp = of_find_node_by_phandle(n);
-/* Return nonzero if a specific node is "valid". */
-static int goodnode(int n, DATA *data)
-{
- if (n == data->lastnode || n == prom_root_node || n == options_node)
- return 1;
- if (n == 0 || n == -1 || !intree(prom_root_node,n))
- return 0;
- data->lastnode = n;
- return 1;
+ if (dp)
+ data->lastnode = dp;
+
+ return dp;
}
/* Copy in a whole string from userspace into kernelspace. */
@@ -330,7 +384,7 @@ static int copyin_string(char __user *user, size_t len, char **ptr)
if (!tmp)
return -ENOMEM;
- if(copy_from_user(tmp, user, len)) {
+ if (copy_from_user(tmp, user, len)) {
kfree(tmp);
return -EFAULT;
}
@@ -345,162 +399,187 @@ static int copyin_string(char __user *user, size_t len, char **ptr)
/*
* NetBSD /dev/openprom ioctl calls.
*/
-static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static int opiocget(void __user *argp, DATA *data)
{
- DATA *data = (DATA *) file->private_data;
- void __user *argp = (void __user *)arg;
struct opiocdesc op;
- int error, node, len;
- char *str, *tmp;
- char buffer[64];
- static int cnt;
-
- switch (cmd) {
- case OPIOCGET:
- if (copy_from_user(&op, argp, sizeof(op)))
- return -EFAULT;
-
- if (!goodnode(op.op_nodeid,data))
- return -EINVAL;
+ struct device_node *dp;
+ char *str;
+ void *pval;
+ int err, len;
- error = copyin_string(op.op_name, op.op_namelen, &str);
- if (error)
- return error;
+ if (copy_from_user(&op, argp, sizeof(op)))
+ return -EFAULT;
- len = prom_getproplen(op.op_nodeid,str);
+ dp = get_node(op.op_nodeid, data);
- if (len > op.op_buflen) {
- kfree(str);
- return -ENOMEM;
- }
+ err = copyin_string(op.op_name, op.op_namelen, &str);
+ if (err)
+ return err;
+ pval = of_get_property(dp, str, &len);
+ err = 0;
+ if (!pval || len > op.op_buflen) {
+ err = -EINVAL;
+ } else {
op.op_buflen = len;
+ if (copy_to_user(argp, &op, sizeof(op)) ||
+ copy_to_user(op.op_buf, pval, len))
+ err = -EFAULT;
+ }
+ kfree(str);
- if (len <= 0) {
- kfree(str);
- /* Verified by the above copy_from_user */
- if (__copy_to_user(argp, &op,
- sizeof(op)))
- return -EFAULT;
- return 0;
- }
+ return err;
+}
- tmp = kmalloc(len + 1, GFP_KERNEL);
- if (!tmp) {
- kfree(str);
- return -ENOMEM;
- }
+static int opiocnextprop(void __user *argp, DATA *data)
+{
+ struct opiocdesc op;
+ struct device_node *dp;
+ struct property *prop;
+ char *str;
+ int err, len;
- cnt = prom_getproperty(op.op_nodeid, str, tmp, len);
- if (cnt <= 0) {
- error = -EINVAL;
- } else {
- tmp[len] = '\0';
+ if (copy_from_user(&op, argp, sizeof(op)))
+ return -EFAULT;
- if (__copy_to_user(argp, &op, sizeof(op)) != 0 ||
- copy_to_user(op.op_buf, tmp, len) != 0)
- error = -EFAULT;
- }
+ dp = get_node(op.op_nodeid, data);
+ if (!dp)
+ return -EINVAL;
- kfree(tmp);
- kfree(str);
+ err = copyin_string(op.op_name, op.op_namelen, &str);
+ if (err)
+ return err;
- return error;
+ if (str[0] == '\0') {
+ prop = dp->properties;
+ } else {
+ prop = of_find_property(dp, str, NULL);
+ if (prop)
+ prop = prop->next;
+ }
+ kfree(str);
- case OPIOCNEXTPROP:
- if (copy_from_user(&op, argp, sizeof(op)))
- return -EFAULT;
+ if (!prop)
+ len = 0;
+ else
+ len = prop->length;
- if (!goodnode(op.op_nodeid,data))
- return -EINVAL;
+ if (len > op.op_buflen)
+ len = op.op_buflen;
- error = copyin_string(op.op_name, op.op_namelen, &str);
- if (error)
- return error;
+ if (copy_to_user(argp, &op, sizeof(op)))
+ return -EFAULT;
- tmp = prom_nextprop(op.op_nodeid,str,buffer);
+ if (len &&
+ copy_to_user(op.op_buf, prop->value, len))
+ return -EFAULT;
- if (tmp) {
- len = strlen(tmp);
- if (len > op.op_buflen)
- len = op.op_buflen;
- else
- op.op_buflen = len;
- } else {
- len = op.op_buflen = 0;
- }
+ return 0;
+}
- if (!access_ok(VERIFY_WRITE, argp, sizeof(op))) {
- kfree(str);
- return -EFAULT;
- }
+static int opiocset(void __user *argp, DATA *data)
+{
+ struct opiocdesc op;
+ struct device_node *dp;
+ char *str, *tmp;
+ int err;
- if (!access_ok(VERIFY_WRITE, op.op_buf, len)) {
- kfree(str);
- return -EFAULT;
- }
+ if (copy_from_user(&op, argp, sizeof(op)))
+ return -EFAULT;
+
+ dp = get_node(op.op_nodeid, data);
+ if (!dp)
+ return -EINVAL;
- error = __copy_to_user(argp, &op, sizeof(op));
- if (!error) error = __copy_to_user(op.op_buf, tmp, len);
+ err = copyin_string(op.op_name, op.op_namelen, &str);
+ if (err)
+ return err;
+ err = copyin_string(op.op_buf, op.op_buflen, &tmp);
+ if (err) {
kfree(str);
+ return err;
+ }
- return error;
+ err = of_set_property(dp, str, tmp, op.op_buflen);
- case OPIOCSET:
- if (copy_from_user(&op, argp, sizeof(op)))
- return -EFAULT;
+ kfree(str);
+ kfree(tmp);
- if (!goodnode(op.op_nodeid,data))
- return -EINVAL;
+ return err;
+}
- error = copyin_string(op.op_name, op.op_namelen, &str);
- if (error)
- return error;
+static int opiocgetnext(unsigned int cmd, void __user *argp)
+{
+ struct device_node *dp;
+ phandle nd;
- error = copyin_string(op.op_buf, op.op_buflen, &tmp);
- if (error) {
- kfree(str);
- return error;
- }
+ BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
- len = prom_setprop(op.op_nodeid,str,tmp,op.op_buflen+1);
+ if (copy_from_user(&nd, argp, sizeof(phandle)))
+ return -EFAULT;
- if (len != op.op_buflen)
+ if (nd == 0) {
+ if (cmd != OPIOCGETNEXT)
return -EINVAL;
+ dp = of_find_node_by_path("/");
+ } else {
+ dp = of_find_node_by_phandle(nd);
+ nd = 0;
+ if (dp) {
+ if (cmd == OPIOCGETNEXT)
+ dp = dp->sibling;
+ else
+ dp = dp->child;
+ }
+ }
+ if (dp)
+ nd = dp->node;
+ if (copy_to_user(argp, &nd, sizeof(phandle)))
+ return -EFAULT;
- kfree(str);
- kfree(tmp);
+ return 0;
+}
- return 0;
+static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ DATA *data = (DATA *) file->private_data;
+ void __user *argp = (void __user *)arg;
+ int err;
- case OPIOCGETOPTNODE:
- if (copy_to_user(argp, &options_node, sizeof(int)))
- return -EFAULT;
- return 0;
+ switch (cmd) {
+ case OPIOCGET:
+ err = opiocget(argp, data);
+ break;
- case OPIOCGETNEXT:
- case OPIOCGETCHILD:
- if (copy_from_user(&node, argp, sizeof(int)))
- return -EFAULT;
+ case OPIOCNEXTPROP:
+ err = opiocnextprop(argp, data);
+ break;
- if (cmd == OPIOCGETNEXT)
- node = __prom_getsibling(node);
- else
- node = __prom_getchild(node);
+ case OPIOCSET:
+ err = opiocset(argp, data);
+ break;
+
+ case OPIOCGETOPTNODE:
+ BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
- if (__copy_to_user(argp, &node, sizeof(int)))
+ if (copy_to_user(argp, &options_node->node, sizeof(phandle)))
return -EFAULT;
return 0;
+ case OPIOCGETNEXT:
+ case OPIOCGETCHILD:
+ err = opiocgetnext(cmd, argp);
+ break;
+
default:
- if (cnt++ < 10)
- printk(KERN_INFO "openprom_bsd_ioctl: cmd 0x%X\n", cmd);
return -EINVAL;
- }
+ };
+
+ return err;
}
@@ -511,7 +590,6 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
DATA *data = (DATA *) file->private_data;
- static int cnt;
switch (cmd) {
case OPROMGETOPT:
@@ -563,10 +641,8 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
return openprom_bsd_ioctl(inode,file,cmd,arg);
default:
- if (cnt++ < 10)
- printk("openprom_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg);
return -EINVAL;
- }
+ };
}
static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
@@ -594,9 +670,7 @@ static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
case OPROMSETCUR:
case OPROMPCI2NODE:
case OPROMPATH2NODE:
- lock_kernel();
rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg);
- lock_kernel();
break;
}
@@ -607,13 +681,13 @@ static int openprom_open(struct inode * inode, struct file * file)
{
DATA *data;
- data = (DATA *) kmalloc(sizeof(DATA), GFP_KERNEL);
+ data = kmalloc(sizeof(DATA), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->current_node = prom_root_node;
- data->lastnode = prom_root_node;
- file->private_data = (void *)data;
+ data->current_node = of_find_node_by_path("/");
+ data->lastnode = data->current_node;
+ file->private_data = (void *) data;
return 0;
}
@@ -634,24 +708,30 @@ static struct file_operations openprom_fops = {
};
static struct miscdevice openprom_dev = {
- SUN_OPENPROM_MINOR, "openprom", &openprom_fops
+ .minor = SUN_OPENPROM_MINOR,
+ .name = "openprom",
+ .fops = &openprom_fops,
};
static int __init openprom_init(void)
{
- int error;
+ struct device_node *dp;
+ int err;
- error = misc_register(&openprom_dev);
- if (error) {
- printk(KERN_ERR "openprom: unable to get misc minor\n");
- return error;
- }
+ err = misc_register(&openprom_dev);
+ if (err)
+ return err;
- options_node = prom_getchild(prom_root_node);
- options_node = prom_searchsiblings(options_node,"options");
+ dp = of_find_node_by_path("/");
+ dp = dp->child;
+ while (dp) {
+ if (!strcmp(dp->name, "options"))
+ break;
+ dp = dp->sibling;
+ }
+ options_node = dp;
- if (options_node == 0 || options_node == -1) {
- printk(KERN_ERR "openprom: unable to find options node\n");
+ if (!options_node) {
misc_deregister(&openprom_dev);
return -EIO;
}
@@ -666,4 +746,3 @@ static void __exit openprom_cleanup(void)
module_init(openprom_init);
module_exit(openprom_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/riowatchdog.c b/drivers/sbus/char/riowatchdog.c
index d1babff6a535..2a9cc8204429 100644
--- a/drivers/sbus/char/riowatchdog.c
+++ b/drivers/sbus/char/riowatchdog.c
@@ -211,7 +211,7 @@ static int __init riowd_bbc_init(void)
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, "bbc"))
+ if (!strcmp(edev->ofdev.node->name, "bbc"))
goto found_bbc;
}
}
@@ -238,7 +238,7 @@ static int __init riowd_init(void)
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, RIOWD_NAME))
+ if (!strcmp(edev->ofdev.node->name, RIOWD_NAME))
goto ebus_done;
}
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 44728ae3fe77..96a81cd17617 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -501,7 +501,7 @@ config SCSI_ATA_PIIX
tristate "Intel PIIX/ICH SATA support"
depends on SCSI_SATA && PCI
help
- This option enables support for ICH5 Serial ATA.
+ This option enables support for ICH5/6/7/8 Serial ATA.
If PATA support was enabled previously, this enables
support for select Intel PIIX/ICH PATA host controllers.
@@ -1169,7 +1169,7 @@ config SCSI_NCR_Q720
you do not have this SCSI card, so say N.
config SCSI_NCR53C8XX_DEFAULT_TAGS
- int " default tagged command queue depth"
+ int "default tagged command queue depth"
depends on SCSI_ZALON || SCSI_NCR_Q720
default "8"
---help---
@@ -1195,7 +1195,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
There is no safe option other than using good SCSI devices.
config SCSI_NCR53C8XX_MAX_TAGS
- int " maximum number of queued commands"
+ int "maximum number of queued commands"
depends on SCSI_ZALON || SCSI_NCR_Q720
default "32"
---help---
@@ -1212,7 +1212,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
There is no safe option and the default answer is recommended.
config SCSI_NCR53C8XX_SYNC
- int " synchronous transfers frequency in MHz"
+ int "synchronous transfers frequency in MHz"
depends on SCSI_ZALON || SCSI_NCR_Q720
default "20"
---help---
@@ -1246,7 +1246,7 @@ config SCSI_NCR53C8XX_SYNC
terminations and SCSI conformant devices.
config SCSI_NCR53C8XX_PROFILE
- bool " enable profiling"
+ bool "enable profiling"
depends on SCSI_ZALON || SCSI_NCR_Q720
help
This option allows you to enable profiling information gathering.
@@ -1257,7 +1257,7 @@ config SCSI_NCR53C8XX_PROFILE
The normal answer therefore is N.
config SCSI_NCR53C8XX_NO_DISCONNECT
- bool " not allow targets to disconnect"
+ bool "not allow targets to disconnect"
depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
help
This option is only provided for safety if you suspect some SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 84d546323dc7..ebd0cf00bf3e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
+obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index fa57e0b4a5fd..75f2f7ae2a8e 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -500,7 +500,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
/*
* Function : int should_disconnect (unsigned char cmd)
*
- * Purpose : decide weather a command would normally disconnect or
+ * Purpose : decide whether a command would normally disconnect or
* not, since if it won't disconnect we should go to sleep.
*
* Input : cmd - opcode of SCSI command
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 35b0a6ebd3f5..7cea514e810a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -104,8 +104,11 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* always true on real computers. It also has some slight problems
* with the GART on x86-64. I've btw never tried DMA from PCI space
* on this platform but don't be surprised if its problematic.
+ * [AK: something is very very wrong when a driver tests this symbol.
+ * Someone should figure out what the comment writer really meant here and fix
+ * the code. Or just remove that bad code. ]
*/
-#ifndef CONFIG_GART_IOMMU
+#ifndef CONFIG_IOMMU
if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
init->HostPhysMemPages =
cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 5ee47555a8af..dd9fb3d91000 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -12374,7 +12374,7 @@ AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
ASC_PRINT1(
"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i);
} else {
- ASC_PRINT("AscInitFromEEP: Succesfully re-wrote EEPROM.");
+ ASC_PRINT("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
}
}
return (warn_code);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 4bb77f62b3b9..f05946777718 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
#include <asm/io.h>
#define DRV_NAME "ahci"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "2.0"
enum {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 521b718763f6..94b1261a259d 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
-#define DRV_VERSION "1.10"
+#define DRV_VERSION "2.00"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 183245254931..58b0748045ee 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3771,7 +3771,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
* @target: The target for the new device.
* @lun: The lun for the new device.
*
- * Return the new device if succesfull or NULL on failure.
+ * Return the new device if successful or NULL on failure.
**/
static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
u8 target, u8 lun)
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 115f55471ed3..497f6642b2dc 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -760,7 +760,7 @@ static int device_inquiry(int host_index, int ldn)
while (!got_interrupt(host_index))
barrier();
- /*if command succesful, break */
+ /*if command successful, break */
if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
@@ -885,7 +885,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
while (!got_interrupt(host_index))
barrier();
- /*if command succesful, break */
+ /*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
@@ -921,7 +921,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
return 2;
} else
global_command_error_excuse = 0;
- /*if command succesful, break */
+ /*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
@@ -959,7 +959,7 @@ static int immediate_reset(int host_index, unsigned int ldn)
/* did not work, finish */
return 1;
}
- /*if command succesful, break */
+ /*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index cd2dffdab77a..681bd18493f3 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -3,9 +3,6 @@
*
* (The IMM is the embedded controller in the ZIP Plus drive.)
*
- * Current Maintainer: David Campbell (Perth, Western Australia)
- * campbell@torque.net
- *
* My unoffical company acronym list is 21 pages long:
* FLA: Four letter acronym with built in facility for
* future expansion to five letters.
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index dc3aebf0e365..ece936ac29c7 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -2,7 +2,7 @@
/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in
* the Iomega ZIP Plus drive
*
- * (c) 1998 David Campbell campbell@torque.net
+ * (c) 1998 David Campbell
*
* Please note that I live in Perth, Western Australia. GMT+0800
*/
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 5353b28b2939..78f2ff736c3e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -6438,7 +6438,7 @@ ips_erase_bios(ips_ha_t * ha)
/* VPP failure */
return (1);
- /* check for succesful flash */
+ /* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
@@ -6550,7 +6550,7 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* VPP failure */
return (1);
- /* check for succesful flash */
+ /* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 6c66877be2bf..d1c1c30d123f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -88,6 +88,10 @@ int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
+module_param(ata_probe_timeout, int, 0444);
+MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -777,11 +781,9 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
- if (ata_msg_probe(ap)) {
+ if (ata_msg_probe(ap))
ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
- "device %u, wait %u\n",
- ap->id, device, wait);
- }
+ "device %u, wait %u\n", ap->id, device, wait);
if (wait)
ata_wait_idle(ap);
@@ -950,7 +952,8 @@ void ata_port_flush_task(struct ata_port *ap)
*/
if (!cancel_delayed_work(&ap->port_task)) {
if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
+ ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
+ __FUNCTION__);
flush_workqueue(ata_wq);
}
@@ -1059,7 +1062,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
- rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
+ rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
ata_port_flush_task(ap);
@@ -1081,7 +1084,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
if (ata_msg_warn(ap))
ata_dev_printk(dev, KERN_WARNING,
- "qc timeout (cmd 0x%x)\n", command);
+ "qc timeout (cmd 0x%x)\n", command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1093,9 +1096,9 @@ unsigned ata_exec_internal(struct ata_device *dev,
if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_printk(dev, KERN_WARNING,
"zero err_mask for failed "
- "internal command, assuming AC_ERR_OTHER\n");
+ "internal command, assuming AC_ERR_OTHER\n");
qc->err_mask |= AC_ERR_OTHER;
}
@@ -1132,6 +1135,33 @@ unsigned ata_exec_internal(struct ata_device *dev,
}
/**
+ * ata_do_simple_cmd - execute simple internal command
+ * @dev: Device to which the command is sent
+ * @cmd: Opcode to execute
+ *
+ * Execute a 'simple' command, that only consists of the opcode
+ * 'cmd' itself, without filling any other registers
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
+{
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = cmd;
+ tf.flags |= ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+}
+
+/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -1193,8 +1223,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
@@ -1263,9 +1293,9 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
return 0;
err_out:
- if (ata_msg_warn(ap))
+ if (ata_msg_warn(ap))
ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
- "(%s, err_mask=0x%x)\n", reason, err_mask);
+ "(%s, err_mask=0x%x)\n", reason, err_mask);
return rc;
}
@@ -1318,19 +1348,21 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
int i, rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_INFO,
+ "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
+ __FUNCTION__, ap->id, dev->devno);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
/* print device capabilities */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
- "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
__FUNCTION__,
id[49], id[82], id[83], id[84],
id[85], id[86], id[87], id[88]);
@@ -1402,14 +1434,16 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
ata_id_major_version(id),
ata_mode_string(xfer_mask),
(unsigned long long)dev->n_sectors,
- dev->cylinders, dev->heads, dev->sectors);
+ dev->cylinders, dev->heads,
+ dev->sectors);
}
if (dev->id[59] & 0x100) {
dev->multi_count = dev->id[59] & 0xff;
if (ata_msg_info(ap))
- ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
- ap->id, dev->devno, dev->multi_count);
+ ata_dev_printk(dev, KERN_INFO,
+ "ata%u: dev %u multi count %u\n",
+ ap->id, dev->devno, dev->multi_count);
}
dev->cdb_len = 16;
@@ -1422,8 +1456,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "unsupported CDB len\n");
+ ata_dev_printk(dev, KERN_WARNING,
+ "unsupported CDB len\n");
rc = -EINVAL;
goto err_out_nosup;
}
@@ -1466,8 +1500,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
err_out_nosup:
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: EXIT, err\n", __FUNCTION__);
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: EXIT, err\n", __FUNCTION__);
return rc;
}
@@ -3527,7 +3561,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* Inherited from caller.
*/
-void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
@@ -3573,7 +3607,7 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
* Inherited from caller.
*/
-void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
+void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
@@ -3607,7 +3641,7 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
* @buflen: buffer length
* @write_data: read/write
*
- * Transfer data from/to the device data register by PIO. Do the
+ * Transfer data from/to the device data register by PIO. Do the
* transfer with interrupts disabled.
*
* LOCKING:
@@ -4946,31 +4980,9 @@ int ata_port_offline(struct ata_port *ap)
return 0;
}
-/*
- * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
- * without filling any other registers
- */
-static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
-{
- struct ata_taskfile tf;
- int err;
-
- ata_tf_init(dev, &tf);
-
- tf.command = cmd;
- tf.flags |= ATA_TFLAG_DEVICE;
- tf.protocol = ATA_PROT_NODATA;
-
- err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
- if (err)
- ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
- __FUNCTION__, err);
-
- return err;
-}
-
-static int ata_flush_cache(struct ata_device *dev)
+int ata_flush_cache(struct ata_device *dev)
{
+ unsigned int err_mask;
u8 cmd;
if (!ata_try_flush_cache(dev))
@@ -4981,17 +4993,41 @@ static int ata_flush_cache(struct ata_device *dev)
else
cmd = ATA_CMD_FLUSH;
- return ata_do_simple_cmd(dev, cmd);
+ err_mask = ata_do_simple_cmd(dev, cmd);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int ata_standby_drive(struct ata_device *dev)
{
- return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
+ unsigned int err_mask;
+
+ err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ return 0;
}
static int ata_start_drive(struct ata_device *dev)
{
- return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
+ unsigned int err_mask;
+
+ err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to start drive "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ return 0;
}
/**
@@ -5212,7 +5248,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
ap->msg_enable = 0x00FF;
#elif defined(ATA_DEBUG)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
-#else
+#else
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
@@ -5709,6 +5745,7 @@ int ata_pci_device_resume(struct pci_dev *pdev)
static int __init ata_init(void)
{
+ ata_probe_timeout *= HZ;
ata_wq = create_workqueue("ata");
if (!ata_wq)
return -ENOMEM;
@@ -5733,7 +5770,7 @@ module_init(ata_init);
module_exit(ata_exit);
static unsigned long ratelimit_time;
-static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ata_ratelimit_lock);
int ata_ratelimit(void)
{
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 823385981a7a..bf5a72aca8a4 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -93,6 +93,38 @@ static int ata_ering_map(struct ata_ering *ering,
return rc;
}
+static unsigned int ata_eh_dev_action(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->ap->eh_context;
+
+ return ehc->i.action | ehc->i.dev_action[dev->devno];
+}
+
+static void ata_eh_clear_action(struct ata_device *dev,
+ struct ata_eh_info *ehi, unsigned int action)
+{
+ int i;
+
+ if (!dev) {
+ ehi->action &= ~action;
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ehi->dev_action[i] &= ~action;
+ } else {
+ /* doesn't make sense for port-wide EH actions */
+ WARN_ON(!(action & ATA_EH_PERDEV_MASK));
+
+ /* break ehi->action into ehi->dev_action */
+ if (ehi->action & action) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ehi->dev_action[i] |= ehi->action & action;
+ ehi->action &= ~action;
+ }
+
+ /* turn off the specified per-dev action */
+ ehi->dev_action[dev->devno] &= ~action;
+ }
+}
+
/**
* ata_scsi_timed_out - SCSI layer time out callback
* @cmd: timed out SCSI command
@@ -702,32 +734,11 @@ static void ata_eh_detach_dev(struct ata_device *dev)
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
}
- spin_unlock_irqrestore(ap->lock, flags);
-}
-
-static void ata_eh_clear_action(struct ata_device *dev,
- struct ata_eh_info *ehi, unsigned int action)
-{
- int i;
+ /* clear per-dev EH actions */
+ ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
+ ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
- if (!dev) {
- ehi->action &= ~action;
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- ehi->dev_action[i] &= ~action;
- } else {
- /* doesn't make sense for port-wide EH actions */
- WARN_ON(!(action & ATA_EH_PERDEV_MASK));
-
- /* break ehi->action into ehi->dev_action */
- if (ehi->action & action) {
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- ehi->dev_action[i] |= ehi->action & action;
- ehi->action &= ~action;
- }
-
- /* turn off the specified per-dev action */
- ehi->dev_action[dev->devno] &= ~action;
- }
+ spin_unlock_irqrestore(ap->lock, flags);
}
/**
@@ -1592,7 +1603,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
unsigned int action;
dev = &ap->device[i];
- action = ehc->i.action | ehc->i.dev_action[dev->devno];
+ action = ata_eh_dev_action(dev);
if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
if (ata_port_offline(ap)) {
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 93d18a74c401..2915bca691e8 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -222,9 +222,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
- if (argbuf)
- kfree(argbuf);
-
+ kfree(argbuf);
return rc;
}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bdd488897096..c325679d9b54 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,7 +29,7 @@
#define __LIBATA_H__
#define DRV_NAME "libata"
-#define DRV_VERSION "1.30" /* must be exactly four chars */
+#define DRV_VERSION "2.00" /* must be exactly four chars */
struct ata_scsi_args {
struct ata_device *dev;
@@ -50,6 +50,7 @@ extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen);
+extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int post_reset, u16 *id);
extern int ata_dev_configure(struct ata_device *dev, int print_info);
@@ -64,6 +65,7 @@ extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
+extern int ata_flush_cache(struct ata_device *dev);
extern void ata_dev_init(struct ata_device *dev);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6ab035590ee6..b28712df0b77 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5118,8 +5118,7 @@ static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp)
cp->host_status &= ~HS_SKIPMASK;
cp->start.schedule.l_paddr =
cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
- list_del(&cp->link_ccbq);
- list_add_tail(&cp->link_ccbq, &lp->skip_ccbq);
+ list_move_tail(&cp->link_ccbq, &lp->skip_ccbq);
if (cp->queued) {
--lp->queuedccbs;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 108910f512e4..d58ac5ad509d 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -6,8 +6,6 @@
* (c) 1995,1996 Grant R. Guenther, grant@torque.net,
* under the terms of the GNU General Public License.
*
- * Current Maintainer: David Campbell (Perth, Western Australia, GMT+0800)
- * campbell@torque.net
*/
#include <linux/config.h>
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index f6e1a1574bb8..7511df3588e4 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -2,7 +2,7 @@
* the Iomega ZIP drive
*
* (c) 1996 Grant R. Guenther grant@torque.net
- * David Campbell campbell@torque.net
+ * David Campbell
*
* All comments to David.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aef093db597e..3d4487eac9b7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2258,8 +2258,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
}
/* Remove device from the new list and add it to DB */
- list_del(&fcport->list);
- list_add_tail(&fcport->list, &ha->fcports);
+ list_move_tail(&fcport->list, &ha->fcports);
/* Login and update database */
qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index d18e7e0932ef..5cc42c6054eb 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "2.0"
enum {
NV_PORTS = 2,
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index bc9f918a7f28..51d86d750e84 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,12 +46,13 @@
#include <linux/libata.h>
#define DRV_NAME "sata_sil"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "2.0"
enum {
/*
* host flags
*/
+ SIL_FLAG_NO_SATA_IRQ = (1 << 28),
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
@@ -62,8 +63,9 @@ enum {
* Controller IDs
*/
sil_3112 = 0,
- sil_3512 = 1,
- sil_3114 = 2,
+ sil_3112_no_sata_irq = 1,
+ sil_3512 = 2,
+ sil_3114 = 3,
/*
* Register offsets
@@ -123,8 +125,8 @@ static const struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
+ { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
+ { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
{ } /* terminate list */
};
@@ -217,6 +219,16 @@ static const struct ata_port_info sil_port_info[] = {
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &sil_ops,
},
+ /* sil_3112_no_sata_irq */
+ {
+ .sht = &sil_sht,
+ .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
+ SIL_FLAG_NO_SATA_IRQ,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x3f, /* udma0-5 */
+ .port_ops = &sil_ops,
+ },
/* sil_3512 */
{
.sht = &sil_sht,
@@ -437,6 +449,10 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
continue;
+ /* turn off SATA_IRQ if not supported */
+ if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
+ bmdma2 &= ~SIL_DMA_SATA_IRQ;
+
if (bmdma2 == 0xffffffff ||
!(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
continue;
@@ -474,8 +490,9 @@ static void sil_thaw(struct ata_port *ap)
ata_chk_status(ap);
ata_bmdma_irq_clear(ap);
- /* turn on SATA IRQ */
- writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
+ /* turn on SATA IRQ if supported */
+ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
+ writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
/* turn on IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index c8b477c67247..b5f8fa955679 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,7 +31,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_sil24"
-#define DRV_VERSION "0.24"
+#define DRV_VERSION "0.3"
/*
* Port request block (PRB) 32 bytes
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index c94b870cf378..7566c2cabaf7 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
#endif /* CONFIG_PPC_OF */
#define DRV_NAME "sata_svw"
-#define DRV_VERSION "1.8"
+#define DRV_VERSION "2.0"
enum {
/* Taskfile registers offsets */
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index f668c997e9af..64f3c1aeed21 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
-#define DRV_VERSION "0.6"
+#define DRV_VERSION "1.0"
enum {
uli_5289 = 0,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 322890b400a6..501ce1791782 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_via"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "2.0"
enum board_ids_enum {
vt6420,
@@ -335,10 +335,10 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pci_resource_start(pdev, i) == 0) ||
(pci_resource_len(pdev, i) < bar_sizes[i])) {
dev_printk(KERN_ERR, &pdev->dev,
- "invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n",
- i,
- pci_resource_start(pdev, i),
- pci_resource_len(pdev, i));
+ "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
+ i,
+ (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long long)pci_resource_len(pdev, i));
rc = -ENODEV;
goto err_out_regions;
}
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 6d0c4f18e652..616fd9634b4b 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -47,7 +47,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_vsc"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "2.0"
enum {
/* Interrupt register offsets (from chip base address) */
@@ -443,16 +443,12 @@ err_out:
}
-/*
- * Intel 31244 is supposed to be identical.
- * Compatibility is untested as of yet.
- */
static const struct pci_device_id vsc_sata_pci_tbl[] = {
- { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174,
+ { PCI_VENDOR_ID_VITESSE, 0x7174,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244,
+ { PCI_VENDOR_ID_INTEL, 0x3200,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
- { }
+ { } /* terminate list */
};
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1272dd249af3..b5218fc0ac86 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2818,7 +2818,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
(cmdstatp->sense_hdr.sense_key == NO_SENSE ||
cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
undone == 0) {
- ioctl_result = 0; /* EOF written succesfully at EOM */
+ ioctl_result = 0; /* EOF written successfully at EOM */
if (fileno >= 0)
fileno++;
STps->drv_file = fileno;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index b88a7c1158af..bff94541991c 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -131,17 +131,6 @@ static int m68328_console_baud = CONSOLE_BAUD_RATE;
static int m68328_console_cbaud = DEFAULT_CBAUD;
-/*
- * tmp_buf is used as a temporary buffer by serial_write. We need to
- * lock it in case the memcpy_fromfs blocks while swapping in a page,
- * and some other program tries to do a serial write at the same time.
- * Since the lock will only come under contention when the system is
- * swapping and available memory is low, it makes sense to share one
- * buffer across all the serial ports, since it significantly saves
- * memory if large numbers of serial ports are open.
- */
-static unsigned char tmp_buf[SERIAL_XMIT_SIZE]; /* This is cheating */
-
static inline int serial_paranoia_check(struct m68k_serial *info,
char *name, const char *routine)
{
@@ -211,16 +200,16 @@ static void rs_stop(struct tty_struct *tty)
if (serial_paranoia_check(info, tty->name, "rs_stop"))
return;
- save_flags(flags); cli();
+ local_irq_save(flags);
uart->ustcnt &= ~USTCNT_TXEN;
- restore_flags(flags);
+ local_irq_restore(flags);
}
static void rs_put_char(char ch)
{
int flags, loops = 0;
- save_flags(flags); cli();
+ local_irq_save(flags);
while (!(UTX & UTX_TX_AVAIL) && (loops < 1000)) {
loops++;
@@ -229,7 +218,7 @@ static void rs_put_char(char ch)
UTX_TXDATA = ch;
udelay(5);
- restore_flags(flags);
+ local_irq_restore(flags);
}
static void rs_start(struct tty_struct *tty)
@@ -241,7 +230,7 @@ static void rs_start(struct tty_struct *tty)
if (serial_paranoia_check(info, tty->name, "rs_start"))
return;
- save_flags(flags); cli();
+ local_irq_save(flags);
if (info->xmit_cnt && info->xmit_buf && !(uart->ustcnt & USTCNT_TXEN)) {
#ifdef USE_INTS
uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
@@ -249,7 +238,7 @@ static void rs_start(struct tty_struct *tty)
uart->ustcnt |= USTCNT_TXEN;
#endif
}
- restore_flags(flags);
+ local_irq_restore(flags);
}
/* Drop into either the boot monitor or kadb upon receiving a break
@@ -327,14 +316,6 @@ static void receive_chars(struct m68k_serial *info, struct pt_regs *regs,
if(!tty)
goto clear_and_exit;
- /*
- * Make sure that we do not overflow the buffer
- */
- if (tty_request_buffer_room(tty, 1) == 0) {
- tty_schedule_flip(tty);
- return;
- }
-
flag = TTY_NORMAL;
if(rx & URX_PARITY_ERROR) {
@@ -473,7 +454,7 @@ static int startup(struct m68k_serial * info)
return -ENOMEM;
}
- save_flags(flags); cli();
+ local_irq_save(flags);
/*
* Clear the FIFO buffers and disable them
@@ -506,7 +487,7 @@ static int startup(struct m68k_serial * info)
change_speed(info);
info->flags |= S_INITIALIZED;
- restore_flags(flags);
+ local_irq_restore(flags);
return 0;
}
@@ -523,7 +504,7 @@ static void shutdown(struct m68k_serial * info)
if (!(info->flags & S_INITIALIZED))
return;
- save_flags(flags); cli(); /* Disable interrupts */
+ local_irq_save(flags);
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
@@ -534,7 +515,7 @@ static void shutdown(struct m68k_serial * info)
set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~S_INITIALIZED;
- restore_flags(flags);
+ local_irq_restore(flags);
}
struct {
@@ -655,24 +636,24 @@ static void rs_fair_output(void)
if (info == 0) return;
if (info->xmit_buf == 0) return;
- save_flags(flags); cli();
+ local_irq_save(flags);
left = info->xmit_cnt;
while (left != 0) {
c = info->xmit_buf[info->xmit_tail];
info->xmit_tail = (info->xmit_tail+1) & (SERIAL_XMIT_SIZE-1);
info->xmit_cnt--;
- restore_flags(flags);
+ local_irq_restore(flags);
rs_put_char(c);
- save_flags(flags); cli();
+ local_irq_save(flags);
left = min(info->xmit_cnt, left-1);
}
/* Last character is being transmitted now (hopefully). */
udelay(5);
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
@@ -720,11 +701,11 @@ static void rs_flush_chars(struct tty_struct *tty)
#endif
/* Enable transmitter */
- save_flags(flags); cli();
+ local_irq_save(flags);
if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
!info->xmit_buf) {
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
@@ -749,7 +730,7 @@ static void rs_flush_chars(struct tty_struct *tty)
while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
}
#endif
- restore_flags(flags);
+ local_irq_restore(flags);
}
extern void console_printn(const char * b, int count);
@@ -768,18 +749,22 @@ static int rs_write(struct tty_struct * tty,
if (!tty || !info->xmit_buf)
return 0;
- save_flags(flags);
+ local_save_flags(flags);
while (1) {
- cli();
+ local_irq_disable();
c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
SERIAL_XMIT_SIZE - info->xmit_head));
+ local_irq_restore(flags);
+
if (c <= 0)
break;
memcpy(info->xmit_buf + info->xmit_head, buf, c);
+
+ local_irq_disable();
info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
info->xmit_cnt += c;
- restore_flags(flags);
+ local_irq_restore(flags);
buf += c;
count -= c;
total += c;
@@ -787,7 +772,7 @@ static int rs_write(struct tty_struct * tty,
if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
/* Enable transmitter */
- cli();
+ local_irq_disable();
#ifndef USE_INTS
while(info->xmit_cnt) {
#endif
@@ -807,9 +792,9 @@ static int rs_write(struct tty_struct * tty,
#ifndef USE_INTS
}
#endif
- restore_flags(flags);
+ local_irq_restore(flags);
}
- restore_flags(flags);
+
return total;
}
@@ -838,12 +823,13 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
static void rs_flush_buffer(struct tty_struct *tty)
{
struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
return;
- cli();
+ local_irq_save(flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- sti();
+ local_irq_restore(flags);
tty_wakeup(tty);
}
@@ -973,14 +959,15 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
m68328_uart *uart = &uart_addr[info->line];
#endif
unsigned char status;
+ unsigned long flags;
- cli();
+ local_irq_save(flags);
#ifdef CONFIG_SERIAL_68328_RTS_CTS
status = (uart->utx.w & UTX_CTS_STAT) ? 1 : 0;
#else
status = 0;
#endif
- sti();
+ local_irq_restore(flags);
put_user(status,value);
return 0;
}
@@ -994,14 +981,13 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
unsigned long flags;
if (!info->port)
return;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
#ifdef USE_INTS
uart->utx.w |= UTX_SEND_BREAK;
msleep_interruptible(duration);
uart->utx.w &= ~UTX_SEND_BREAK;
#endif
- restore_flags(flags);
+ local_irq_restore(flags);
}
static int rs_ioctl(struct tty_struct *tty, struct file * file,
@@ -1060,7 +1046,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
(struct serial_struct *) arg);
case TIOCSERGETLSR: /* Get line status register */
if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int));
+ sizeof(unsigned int)))
return get_lsr_info(info, (unsigned int *) arg);
return -EFAULT;
case TIOCSERGSTRUCT:
@@ -1113,10 +1099,10 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
return;
- save_flags(flags); cli();
+ local_irq_save(flags);
if (tty_hung_up_p(filp)) {
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
@@ -1138,7 +1124,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
info->count = 0;
}
if (info->count) {
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
info->flags |= S_CLOSING;
@@ -1186,7 +1172,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
}
info->flags &= ~(S_NORMAL_ACTIVE|S_CLOSING);
wake_up_interruptible(&info->close_wait);
- restore_flags(flags);
+ local_irq_restore(flags);
}
/*
@@ -1262,9 +1248,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
info->count--;
info->blocked_open++;
while (1) {
- cli();
+ local_irq_disable();
m68k_rtsdtr(info, 1);
- sti();
+ local_irq_enable();
current->state = TASK_INTERRUPTIBLE;
if (tty_hung_up_p(filp) ||
!(info->flags & S_INITIALIZED)) {
@@ -1444,7 +1430,7 @@ rs68328_init(void)
return -ENOMEM;
}
- save_flags(flags); cli();
+ local_irq_save(flags);
for(i=0;i<NR_PORTS;i++) {
@@ -1489,7 +1475,7 @@ rs68328_init(void)
serial_pm[i]->data = info;
#endif
}
- restore_flags(flags);
+ local_irq_restore(flags);
return 0;
}
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 94886c000d2a..864ef859be56 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -594,8 +594,8 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
else
offset += idx * board->uart_offset;
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) /
- (8 << board->reg_shift);
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
return 1;
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index b79ed0665d51..739bc84f91e9 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -323,8 +323,10 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "USR9180", 0 },
/* U.S. Robotics 56K Voice INT PnP*/
{ "USR9190", 0 },
- /* HP Compaq Tablet PC tc1100 Wacom tablet */
+ /* Wacom tablets */
+ { "WACF004", 0 },
{ "WACF005", 0 },
+ { "WACF006", 0 },
/* Rockwell's (PORALiNK) 33600 INT PNP */
{ "WCI0003", 0 },
/* Unkown PnP modems */
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 89700141f87e..5cacc5e74a92 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -2573,12 +2573,6 @@ static void flush_to_flip_buffer(struct e100_serial *info)
DFLIP(
if (1) {
-
- if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- DEBUG_LOG(info->line, "*** TTY_DONT_FLIP set flip.count %i ***\n", tty->flip.count);
- DEBUG_LOG(info->line, "*** recv_cnt %i\n", info->recv_cnt);
- } else {
- }
DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx);
DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty));
DEBUG_LOG(info->line, "room %lu\n", tty->ldisc.receive_room(tty));
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 7d823705193c..f8262e6ad8d3 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -589,13 +589,6 @@ void jsm_input(struct jsm_channel *ch)
ld = tty_ldisc_ref(tp);
/*
- * If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
- */
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
- len = 0;
-
- /*
* If we were unable to get a reference to the ld,
* don't flush our buffer, and act like the ld doesn't
* have any space to put the data right now.
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 8cbbb954df2c..8ad242934368 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -60,11 +60,11 @@ struct timer_list mcfrs_timer_struct;
#if defined(CONFIG_HW_FEITH)
#define CONSOLE_BAUD_RATE 38400
#define DEFAULT_CBAUD B38400
-#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB)
+#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB)
#define CONSOLE_BAUD_RATE 115200
#define DEFAULT_CBAUD B115200
#elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \
- defined(CONFIG_senTec) || defined(CONFIG_SNEHA)
+ defined(CONFIG_senTec) || defined(CONFIG_SNEHA) || defined(CONFIG_AVNET)
#define CONSOLE_BAUD_RATE 19200
#define DEFAULT_CBAUD B19200
#endif
@@ -93,7 +93,7 @@ static struct tty_driver *mcfrs_serial_driver;
#undef SERIAL_DEBUG_FLOW
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#define IRQBASE (MCFINT_VECBASE+MCFINT_UART0)
#else
#define IRQBASE 73
@@ -1545,6 +1545,28 @@ static void mcfrs_irqinit(struct mcf_serial *info)
*feci2c_par |= MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2
| MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2;
}
+#elif defined(CONFIG_M532x)
+ volatile unsigned char *uartp;
+ uartp = info->addr;
+ switch (info->line) {
+ case 0:
+ MCF_INTC0_ICR26 = 0x3;
+ MCF_INTC0_CIMR = 26;
+ /* GPIO initialization */
+ MCF_GPIO_PAR_UART |= 0x000F;
+ break;
+ case 1:
+ MCF_INTC0_ICR27 = 0x3;
+ MCF_INTC0_CIMR = 27;
+ /* GPIO initialization */
+ MCF_GPIO_PAR_UART |= 0x0FF0;
+ break;
+ case 2:
+ MCF_INTC0_ICR28 = 0x3;
+ MCF_INTC0_CIMR = 28;
+ /* GPIOs also must be initalized, depends on board */
+ break;
+ }
#else
volatile unsigned char *icrp, *uartp;
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 501316b198e5..ed946311d3a4 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -26,7 +26,7 @@ static DECLARE_RWSEM(ioc3_devices_rwsem);
static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES];
static struct ioc3_submodule *ioc3_ethernet;
-static rwlock_t ioc3_submodules_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ioc3_submodules_lock);
/* NIC probing code */
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
index 8256a97eb508..8562821e6498 100644
--- a/drivers/sn/ioc4.c
+++ b/drivers/sn/ioc4.c
@@ -438,7 +438,7 @@ static struct pci_device_id ioc4_id_table[] = {
{0}
};
-static struct pci_driver __devinitdata ioc4_driver = {
+static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1cea4a6799fe..ed1cdf6ac8f3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -210,6 +210,7 @@ spi_new_device(struct spi_master *master, struct spi_board_info *chip)
proxy->master = master;
proxy->chip_select = chip->chip_select;
proxy->max_speed_hz = chip->max_speed_hz;
+ proxy->mode = chip->mode;
proxy->irq = chip->irq;
proxy->modalias = chip->modalias;
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 5578a9dd04e8..f6b2948ab288 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5712,7 +5712,7 @@ static int ixj_daa_write(IXJ *j)
return 1;
}
-int ixj_set_tone_off(unsigned short arg, IXJ *j)
+static int ixj_set_tone_off(unsigned short arg, IXJ *j)
{
j->tone_off_time = arg;
if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 2fe7fd19437b..4a22909518f5 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -411,8 +411,7 @@ static inline void urb_list_move_last(struct urb *urb, int epid)
urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
assert(urb_entry);
- list_del(&urb_entry->list);
- list_add_tail(&urb_entry->list, &urb_list[epid]);
+ list_move_tail(&urb_entry->list, &urb_list[epid]);
}
/* Get the next urb in the list. */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 6b4bc3f2bd86..89bcda5a3298 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1684,9 +1684,13 @@ sl811h_probe(struct platform_device *dev)
if (!addr || !data)
return -ENODEV;
ioaddr = 1;
-
- addr_reg = (void __iomem *) addr->start;
- data_reg = (void __iomem *) data->start;
+ /*
+ * NOTE: 64-bit resource->start is getting truncated
+ * to avoid compiler warning, assuming that ->start
+ * is always 32-bit for this case
+ */
+ addr_reg = (void __iomem *) (unsigned long) addr->start;
+ data_reg = (void __iomem *) (unsigned long) data->start;
} else {
addr_reg = ioremap(addr->start, 1);
if (addr_reg == NULL) {
diff --git a/drivers/usb/input/fixp-arith.h b/drivers/usb/input/fixp-arith.h
index b44d398de071..ed3d2da0c485 100644
--- a/drivers/usb/input/fixp-arith.h
+++ b/drivers/usb/input/fixp-arith.h
@@ -2,8 +2,6 @@
#define _FIXP_ARITH_H
/*
- * $$
- *
* Simplistic fixed-point arithmetics.
* Hmm, I'm probably duplicating some code :(
*
@@ -31,20 +29,20 @@
#include <linux/types.h>
-// The type representing fixed-point values
+/* The type representing fixed-point values */
typedef s16 fixp_t;
#define FRAC_N 8
#define FRAC_MASK ((1<<FRAC_N)-1)
-// Not to be used directly. Use fixp_{cos,sin}
-static const fixp_t cos_table[45] = {
+/* Not to be used directly. Use fixp_{cos,sin} */
+static const fixp_t cos_table[46] = {
0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
- 0x002C, 0x0023, 0x001A, 0x0011, 0x0008
+ 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
};
@@ -68,9 +66,8 @@ static inline fixp_t fixp_cos(unsigned int degrees)
int quadrant = (degrees / 90) & 3;
unsigned int i = degrees % 90;
- if (quadrant == 1 || quadrant == 3) {
- i = 89 - i;
- }
+ if (quadrant == 1 || quadrant == 3)
+ i = 90 - i;
i >>= 1;
diff --git a/drivers/usb/input/hid-debug.h b/drivers/usb/input/hid-debug.h
index 702c48c2f81b..f04d6d75c098 100644
--- a/drivers/usb/input/hid-debug.h
+++ b/drivers/usb/input/hid-debug.h
@@ -563,7 +563,7 @@ static char *keys[KEY_MAX + 1] = {
[KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
[KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
[KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
- [KEY_HANGUEL] = "Hanguel", [KEY_HANJA] = "Hanja",
+ [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
[KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
[KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
[KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 9432c7302275..d7f3f736a692 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -453,8 +453,7 @@ static void ir_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
tty = port->tty;
/*
- * FIXME: must not do this in IRQ context,
- * must honour TTY_DONT_FLIP
+ * FIXME: must not do this in IRQ context
*/
tty->ldisc.receive_buf(
tty,
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5b06fa366098..56ffc81302fc 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -686,19 +686,16 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
usb_kill_urb(urb);
- list_del(tmp);
- list_add(tmp, &info->rx_urbs_free);
- }
- list_for_each_safe(tmp, tmp2, &info->rx_urb_q) {
- list_del(tmp);
- list_add(tmp, &info->rx_urbs_free);
+ list_move(tmp, &info->rx_urbs_free);
}
+ list_for_each_safe(tmp, tmp2, &info->rx_urb_q)
+ list_move(tmp, &info->rx_urbs_free);
+
list_for_each_safe(tmp, tmp2, &info->tx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
usb_kill_urb(urb);
- list_del(tmp);
- list_add(tmp, &info->tx_urbs_free);
+ list_move(tmp, &info->tx_urbs_free);
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -1080,8 +1077,7 @@ static void whiteheat_write_callback(struct urb *urb, struct pt_regs *regs)
err("%s - Not my urb!", __FUNCTION__);
return;
}
- list_del(&wrap->list);
- list_add(&wrap->list, &info->tx_urbs_free);
+ list_move(&wrap->list, &info->tx_urbs_free);
spin_unlock(&info->lock);
if (urb->status) {
@@ -1371,8 +1367,7 @@ static int start_port_read(struct usb_serial_port *port)
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
usb_kill_urb(urb);
- list_del(tmp);
- list_add(tmp, &info->rx_urbs_free);
+ list_move(tmp, &info->rx_urbs_free);
}
break;
}
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 009fb0953a56..5284abe1b5eb 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -160,10 +160,10 @@ struct us_data {
};
/* Convert between us_data and the corresponding Scsi_Host */
-static struct Scsi_Host inline *us_to_host(struct us_data *us) {
+static inline struct Scsi_Host *us_to_host(struct us_data *us) {
return container_of((void *) us, struct Scsi_Host, hostdata);
}
-static struct us_data inline *host_to_us(struct Scsi_Host *host) {
+static inline struct us_data *host_to_us(struct Scsi_Host *host) {
return (struct us_data *) host->hostdata;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 168ede7902bd..17de4c84db69 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -4,6 +4,21 @@
menu "Graphics support"
+config FIRMWARE_EDID
+ bool "Enable firmware EDID"
+ default y
+ ---help---
+ This enables access to the EDID transferred from the firmware.
+ On the i386, this is from the Video BIOS. Enable this if DDC/I2C
+ transfers do not work for your driver and if you are using
+ nvidiafb, i810fb or savagefb.
+
+ In general, choosing Y for this option is safe. If you
+ experience extremely long delays while booting before you get
+ something on your display, try setting this to N. Matrox cards in
+ combination with certain motherboards and monitors are known to
+ suffer from this problem.
+
config FB
tristate "Support for frame buffer devices"
---help---
@@ -70,22 +85,6 @@ config FB_MACMODES
depends on FB
default n
-config FB_FIRMWARE_EDID
- bool "Enable firmware EDID"
- depends on FB
- default y
- ---help---
- This enables access to the EDID transferred from the firmware.
- On the i386, this is from the Video BIOS. Enable this if DDC/I2C
- transfers do not work for your driver and if you are using
- nvidiafb, i810fb or savagefb.
-
- In general, choosing Y for this option is safe. If you
- experience extremely long delays while booting before you get
- something on your display, try setting this to N. Matrox cards in
- combination with certain motherboards and monitors are known to
- suffer from this problem.
-
config FB_BACKLIGHT
bool
depends on FB
@@ -551,10 +550,14 @@ config FB_VESA
You will get a boot time penguin logo at no additional cost. Please
read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
-config VIDEO_SELECT
- bool
- depends on FB_VESA
- default y
+config FB_IMAC
+ bool "Intel-based Macintosh Framebuffer Support"
+ depends on (FB = y) && X86
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the Intel-based Macintosh
config FB_HGA
tristate "Hercules mono graphics support"
@@ -578,12 +581,6 @@ config FB_HGA_ACCEL
This will compile the Hercules mono graphics with
acceleration functions.
-
-config VIDEO_SELECT
- bool
- depends on (FB = y) && X86
- default y
-
config FB_SGIVW
tristate "SGI Visual Workstation framebuffer support"
depends on FB && X86_VISWS
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 23de3b2c7856..c335e9bc3b20 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -4,15 +4,15 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_VT) += console/
-obj-$(CONFIG_LOGO) += logo/
-obj-$(CONFIG_SYSFS) += backlight/
-
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
modedb.o fbcvt.o
fb-objs := $(fb-y)
+obj-$(CONFIG_VT) += console/
+obj-$(CONFIG_LOGO) += logo/
+obj-$(CONFIG_SYSFS) += backlight/
+
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
@@ -97,6 +97,7 @@ obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_VESA) += vesafb.o
+obj-$(CONFIG_FB_IMAC) += imacfb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o vgastate.o
obj-$(CONFIG_FB_OF) += offb.o
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index db878fd55fb2..11cf7fcb1d55 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -100,7 +100,7 @@
#ifndef CONFIG_PPC_PMAC
/* default mode */
-static struct fb_var_screeninfo default_var __initdata = {
+static struct fb_var_screeninfo default_var __devinitdata = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
640, 480, 640, 480, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -123,7 +123,7 @@ static struct fb_var_screeninfo default_var = {
/* default modedb mode */
/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
-static struct fb_videomode defaultmode __initdata = {
+static struct fb_videomode defaultmode __devinitdata = {
.refresh = 60,
.xres = 640,
.yres = 480,
@@ -335,7 +335,7 @@ static const struct aty128_meminfo sdr_sgram =
static const struct aty128_meminfo ddr_sgram =
{ 4, 4, 3, 3, 2, 3, 1, 16, 31, 16, "64-bit DDR SGRAM" };
-static struct fb_fix_screeninfo aty128fb_fix __initdata = {
+static struct fb_fix_screeninfo aty128fb_fix __devinitdata = {
.id = "ATY Rage128",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -345,15 +345,15 @@ static struct fb_fix_screeninfo aty128fb_fix __initdata = {
.accel = FB_ACCEL_ATI_RAGE128,
};
-static char *mode_option __initdata = NULL;
+static char *mode_option __devinitdata = NULL;
#ifdef CONFIG_PPC_PMAC
-static int default_vmode __initdata = VMODE_1024_768_60;
-static int default_cmode __initdata = CMODE_8;
+static int default_vmode __devinitdata = VMODE_1024_768_60;
+static int default_cmode __devinitdata = CMODE_8;
#endif
-static int default_crt_on __initdata = 0;
-static int default_lcd_on __initdata = 1;
+static int default_crt_on __devinitdata = 0;
+static int default_lcd_on __devinitdata = 1;
#ifdef CONFIG_MTRR
static int mtrr = 1;
@@ -445,9 +445,9 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
static int aty128_decode_var(struct fb_var_screeninfo *var,
struct aty128fb_par *par);
#if 0
-static void __init aty128_get_pllinfo(struct aty128fb_par *par,
+static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
void __iomem *bios);
-static void __init __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par);
+static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par);
#endif
static void aty128_timings(struct aty128fb_par *par);
static void aty128_init_engine(struct aty128fb_par *par);
@@ -573,7 +573,7 @@ static void aty_pll_writeupdate(const struct aty128fb_par *par)
/* write to the scratch register to test r/w functionality */
-static int __init register_test(const struct aty128fb_par *par)
+static int __devinit register_test(const struct aty128fb_par *par)
{
u32 val;
int flag = 0;
@@ -772,7 +772,7 @@ static u32 depth_to_dst(u32 depth)
#ifndef __sparc__
-static void __iomem * __init aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev)
+static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev)
{
u16 dptr;
u8 rom_type;
@@ -856,7 +856,7 @@ static void __iomem * __init aty128_map_ROM(const struct aty128fb_par *par, stru
return NULL;
}
-static void __init aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios)
+static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios)
{
unsigned int bios_hdr;
unsigned int bios_pll;
@@ -903,7 +903,7 @@ static void __iomem * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
#endif /* ndef(__sparc__) */
/* fill in known card constants if pll_block is not available */
-static void __init aty128_timings(struct aty128fb_par *par)
+static void __devinit aty128_timings(struct aty128fb_par *par)
{
#ifdef CONFIG_PPC_OF
/* instead of a table lookup, assume OF has properly
@@ -1645,7 +1645,7 @@ static int aty128fb_sync(struct fb_info *info)
}
#ifndef MODULE
-static int __init aty128fb_setup(char *options)
+static int __devinit aty128fb_setup(char *options)
{
char *this_opt;
@@ -1893,7 +1893,7 @@ static void aty128_early_resume(void *data)
}
#endif /* CONFIG_PPC_PMAC */
-static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par = info->par;
@@ -2037,7 +2037,7 @@ static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *
#ifdef CONFIG_PCI
/* register a card ++ajoshi */
-static int __init aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long fb_addr, reg_addr;
struct aty128fb_par *par;
@@ -2556,7 +2556,7 @@ static int aty128_pci_resume(struct pci_dev *pdev)
}
-static int __init aty128fb_init(void)
+static int __devinit aty128fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index c5185f7cf4ba..22e720611bf6 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -316,12 +316,12 @@ static int vram;
static int pll;
static int mclk;
static int xclk;
-static int comp_sync __initdata = -1;
+static int comp_sync __devinitdata = -1;
static char *mode;
#ifdef CONFIG_PPC
-static int default_vmode __initdata = VMODE_CHOOSE;
-static int default_cmode __initdata = CMODE_CHOOSE;
+static int default_vmode __devinitdata = VMODE_CHOOSE;
+static int default_cmode __devinitdata = CMODE_CHOOSE;
module_param_named(vmode, default_vmode, int, 0);
MODULE_PARM_DESC(vmode, "int: video mode for mac");
@@ -330,10 +330,10 @@ MODULE_PARM_DESC(cmode, "int: color mode for mac");
#endif
#ifdef CONFIG_ATARI
-static unsigned int mach64_count __initdata = 0;
-static unsigned long phys_vmembase[FB_MAX] __initdata = { 0, };
-static unsigned long phys_size[FB_MAX] __initdata = { 0, };
-static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, };
+static unsigned int mach64_count __devinitdata = 0;
+static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, };
+static unsigned long phys_size[FB_MAX] __devinitdata = { 0, };
+static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#endif
/* top -> down is an evolution of mach64 chipset, any corrections? */
@@ -583,7 +583,7 @@ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *p
* Apple monitor sense
*/
-static int __init read_aty_sense(const struct atyfb_par *par)
+static int __devinit read_aty_sense(const struct atyfb_par *par)
{
int sense, i;
@@ -1281,6 +1281,14 @@ static int atyfb_set_par(struct fb_info *info)
par->accel_flags = var->accel_flags; /* hack */
+ if (var->accel_flags) {
+ info->fbops->fb_sync = atyfb_sync;
+ info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ } else {
+ info->fbops->fb_sync = NULL;
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
if (par->blitter_may_be_busy)
wait_for_idle(par);
@@ -2253,7 +2261,7 @@ static void aty_bl_exit(struct atyfb_par *par)
#endif /* CONFIG_FB_ATY_BACKLIGHT */
-static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
+static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
@@ -2313,7 +2321,7 @@ static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
}
#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
-static int __init aty_init(struct fb_info *info, const char *name)
+static int __devinit aty_init(struct fb_info *info, const char *name)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
const char *ramname = NULL, *xtal;
@@ -2394,12 +2402,15 @@ static int __init aty_init(struct fb_info *info, const char *name)
break;
}
switch (clk_type) {
+#ifdef CONFIG_ATARI
case CLK_ATI18818_1:
par->pll_ops = &aty_pll_ati18818_1;
break;
+#else
case CLK_IBMRGB514:
par->pll_ops = &aty_pll_ibm514;
break;
+#endif
#if 0 /* dead code */
case CLK_STG1703:
par->pll_ops = &aty_pll_stg1703;
@@ -2604,7 +2615,11 @@ static int __init aty_init(struct fb_info *info, const char *name)
info->fbops = &atyfb_ops;
info->pseudo_palette = pseudo_palette;
- info->flags = FBINFO_FLAG_DEFAULT;
+ info->flags = FBINFO_DEFAULT |
+ FBINFO_HWACCEL_IMAGEBLIT |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_YPAN;
#ifdef CONFIG_PMAC_BACKLIGHT
if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) {
@@ -2733,7 +2748,7 @@ aty_init_exit:
}
#ifdef CONFIG_ATARI
-static int __init store_video_par(char *video_str, unsigned char m64_num)
+static int __devinit store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
unsigned long vmembase, size, guiregbase;
@@ -3764,7 +3779,7 @@ static struct pci_driver atyfb_driver = {
#endif /* CONFIG_PCI */
#ifndef MODULE
-static int __init atyfb_setup(char *options)
+static int __devinit atyfb_setup(char *options)
{
char *this_opt;
@@ -3836,7 +3851,7 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
-static int __init atyfb_init(void)
+static int __devinit atyfb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index c98f4a442134..1490e5e1c232 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -200,8 +200,6 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
if (!area->width || !area->height)
return;
if (!par->accel_flags) {
- if (par->blitter_may_be_busy)
- wait_for_idle(par);
cfb_copyarea(info, area);
return;
}
@@ -248,8 +246,6 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (!rect->width || !rect->height)
return;
if (!par->accel_flags) {
- if (par->blitter_may_be_busy)
- wait_for_idle(par);
cfb_fillrect(info, rect);
return;
}
@@ -288,14 +284,10 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
return;
if (!par->accel_flags ||
(image->depth != 1 && info->var.bits_per_pixel != image->depth)) {
- if (par->blitter_may_be_busy)
- wait_for_idle(par);
-
cfb_imageblit(info, image);
return;
}
- wait_for_idle(par);
pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
@@ -425,8 +417,6 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
- wait_for_idle(par);
-
/* restore pix_width */
wait_for_fifo(1, par);
aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
index ad8b7496f853..2a7f381c330f 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
@@ -66,11 +66,6 @@ static const u8 cursor_bits_lookup[16] = {
0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55
};
-static const u8 cursor_mask_lookup[16] = {
- 0xaa, 0x2a, 0x8a, 0x0a, 0xa2, 0x22, 0x82, 0x02,
- 0xa8, 0x28, 0x88, 0x08, 0xa0, 0x20, 0x80, 0x00
-};
-
static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
@@ -130,13 +125,13 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
fg_idx = cursor->image.fg_color;
bg_idx = cursor->image.bg_color;
- fg = (info->cmap.red[fg_idx] << 24) |
- (info->cmap.green[fg_idx] << 16) |
- (info->cmap.blue[fg_idx] << 8) | 15;
+ fg = ((info->cmap.red[fg_idx] & 0xff) << 24) |
+ ((info->cmap.green[fg_idx] & 0xff) << 16) |
+ ((info->cmap.blue[fg_idx] & 0xff) << 8) | 0xff;
- bg = (info->cmap.red[bg_idx] << 24) |
- (info->cmap.green[bg_idx] << 16) |
- (info->cmap.blue[bg_idx] << 8);
+ bg = ((info->cmap.red[bg_idx] & 0xff) << 24) |
+ ((info->cmap.green[bg_idx] & 0xff) << 16) |
+ ((info->cmap.blue[bg_idx] & 0xff) << 8);
wait_for_fifo(2, par);
aty_st_le32(CUR_CLR0, bg, par);
@@ -166,19 +161,17 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
switch (cursor->rop) {
case ROP_XOR:
// Upper 4 bits of mask data
- fb_writeb(cursor_mask_lookup[m >> 4 ] |
- cursor_bits_lookup[(b ^ m) >> 4], dst++);
+ fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
// Lower 4 bits of mask
- fb_writeb(cursor_mask_lookup[m & 0x0f ] |
- cursor_bits_lookup[(b ^ m) & 0x0f], dst++);
+ fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
+ dst++);
break;
case ROP_COPY:
// Upper 4 bits of mask data
- fb_writeb(cursor_mask_lookup[m >> 4 ] |
- cursor_bits_lookup[(b & m) >> 4], dst++);
+ fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
// Lower 4 bits of mask
- fb_writeb(cursor_mask_lookup[m & 0x0f ] |
- cursor_bits_lookup[(b & m) & 0x0f], dst++);
+ fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
+ dst++);
break;
}
}
@@ -194,7 +187,7 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-int __init aty_init_cursor(struct fb_info *info)
+int __devinit aty_init_cursor(struct fb_info *info)
{
unsigned long addr;
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 7de66b855d4e..1755dddf1899 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -40,14 +40,14 @@ static int radeon_bl_get_level_brightness(struct radeon_bl_privdata *pdata,
mutex_unlock(&info->bl_mutex);
- if (pdata->negative)
- rlevel = MAX_RADEON_LEVEL - rlevel;
-
if (rlevel < 0)
rlevel = 0;
else if (rlevel > MAX_RADEON_LEVEL)
rlevel = MAX_RADEON_LEVEL;
+ if (pdata->negative)
+ rlevel = MAX_RADEON_LEVEL - rlevel;
+
return rlevel;
}
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index c5ecbb02e01d..68b15645b893 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2379,7 +2379,6 @@ err_release_pci0:
err_release_fb:
framebuffer_release(info);
err_disable:
- pci_disable_device(pdev);
err_out:
return ret;
}
@@ -2436,7 +2435,6 @@ static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
#endif
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
- pci_disable_device(pdev);
}
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 789450bb0bc9..9ef68cd83bb4 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -7,6 +7,8 @@
* Karl Lessard <klessard@sunrisetelecom.com>
* <c.pellegrin@exadron.com>
*
+ * PM support added by Rodolfo Giometti <giometti@linux.it>
+ *
* Copyright 2002 MontaVista Software
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
@@ -602,17 +604,52 @@ int au1100fb_drv_remove(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static u32 sys_clksrc;
+static struct au1100fb_regs fbregs;
+
int au1100fb_drv_suspend(struct device *dev, pm_message_t state)
{
- /* TODO */
+ struct au1100fb_device *fbdev = dev_get_drvdata(dev);
+
+ if (!fbdev)
+ return 0;
+
+ /* Save the clock source state */
+ sys_clksrc = au_readl(SYS_CLKSRC);
+
+ /* Blank the LCD */
+ au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
+
+ /* Stop LCD clocking */
+ au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
+
+ memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
+
return 0;
}
int au1100fb_drv_resume(struct device *dev)
{
- /* TODO */
+ struct au1100fb_device *fbdev = dev_get_drvdata(dev);
+
+ if (!fbdev)
+ return 0;
+
+ memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
+
+ /* Restart LCD clocking */
+ au_writel(sys_clksrc, SYS_CLKSRC);
+
+ /* Unblank the LCD */
+ au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
+
return 0;
}
+#else
+#define au1100fb_drv_suspend NULL
+#define au1100fb_drv_resume NULL
+#endif
static struct device_driver au1100fb_driver = {
.name = "au1100-lcd",
@@ -706,8 +743,7 @@ void __exit au1100fb_cleanup(void)
{
driver_unregister(&au1100fb_driver);
- if (drv_info.opt_mode)
- kfree(drv_info.opt_mode);
+ kfree(drv_info.opt_mode);
}
module_init(au1100fb_init);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index b895eaaa73fd..022f9d3473f5 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -10,7 +10,7 @@ menuconfig BACKLIGHT_LCD_SUPPORT
config BACKLIGHT_CLASS_DEVICE
tristate "Lowlevel Backlight controls"
- depends on BACKLIGHT_LCD_SUPPORT
+ depends on BACKLIGHT_LCD_SUPPORT && FB
default m
help
This framework adds support for low-level control of the LCD
@@ -26,7 +26,7 @@ config BACKLIGHT_DEVICE
config LCD_CLASS_DEVICE
tristate "Lowlevel LCD controls"
- depends on BACKLIGHT_LCD_SUPPORT
+ depends on BACKLIGHT_LCD_SUPPORT && FB
default m
help
This framework adds support for low-level control of LCD.
@@ -50,6 +50,14 @@ config BACKLIGHT_CORGI
If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y to enable the
backlight driver.
+config BACKLIGHT_LOCOMO
+ tristate "Sharp LOCOMO LCD/Backlight Driver"
+ depends on BACKLIGHT_DEVICE && SHARP_LOCOMO
+ default y
+ help
+ If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
+ enable the LCD/backlight driver.
+
config BACKLIGHT_HP680
tristate "HP Jornada 680 Backlight Driver"
depends on BACKLIGHT_DEVICE && SH_HP6XX
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 744210c38e74..65e5553fc849 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
-obj-$(CONFIG_SHARP_LOCOMO) += locomolcd.o
+obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index a71e984c93d4..ffc72ae3ada8 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -27,7 +27,7 @@
static int hp680bl_suspended;
static int current_intensity = 0;
-static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bl_lock);
static struct backlight_device *hp680_backlight_device;
static void hp680bl_send_intensity(struct backlight_device *bd)
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 60831bb23685..bd879b7ec119 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -17,6 +17,8 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
#include <asm/hardware/locomo.h>
#include <asm/irq.h>
@@ -25,7 +27,10 @@
#include "../../../arch/arm/mach-sa1100/generic.h"
+static struct backlight_device *locomolcd_bl_device;
static struct locomo_dev *locomolcd_dev;
+static unsigned long locomolcd_flags;
+#define LOCOMOLCD_SUSPENDED 0x01
static void locomolcd_on(int comadj)
{
@@ -89,12 +94,10 @@ void locomolcd_power(int on)
}
/* read comadj */
- if (comadj == -1) {
- if (machine_is_poodle())
- comadj = 118;
- if (machine_is_collie())
- comadj = 128;
- }
+ if (comadj == -1 && machine_is_collie())
+ comadj = 128;
+ if (comadj == -1 && machine_is_poodle())
+ comadj = 118;
if (on)
locomolcd_on(comadj);
@@ -105,26 +108,100 @@ void locomolcd_power(int on)
}
EXPORT_SYMBOL(locomolcd_power);
-static int poodle_lcd_probe(struct locomo_dev *dev)
+
+static int current_intensity;
+
+static int locomolcd_set_intensity(struct backlight_device *bd)
+{
+ int intensity = bd->props->brightness;
+
+ if (bd->props->power != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (bd->props->fb_blank != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (locomolcd_flags & LOCOMOLCD_SUSPENDED)
+ intensity = 0;
+
+ switch (intensity) {
+ /* AC and non-AC are handled differently, but produce same results in sharp code? */
+ case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break;
+ case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break;
+ case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break;
+ case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break;
+ case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break;
+
+ default:
+ return -ENODEV;
+ }
+ current_intensity = intensity;
+ return 0;
+}
+
+static int locomolcd_get_intensity(struct backlight_device *bd)
+{
+ return current_intensity;
+}
+
+static struct backlight_properties locomobl_data = {
+ .owner = THIS_MODULE,
+ .get_brightness = locomolcd_get_intensity,
+ .update_status = locomolcd_set_intensity,
+ .max_brightness = 4,
+};
+
+#ifdef CONFIG_PM
+static int locomolcd_suspend(struct locomo_dev *dev, pm_message_t state)
+{
+ locomolcd_flags |= LOCOMOLCD_SUSPENDED;
+ locomolcd_set_intensity(locomolcd_bl_device);
+ return 0;
+}
+
+static int locomolcd_resume(struct locomo_dev *dev)
+{
+ locomolcd_flags &= ~LOCOMOLCD_SUSPENDED;
+ locomolcd_set_intensity(locomolcd_bl_device);
+ return 0;
+}
+#else
+#define locomolcd_suspend NULL
+#define locomolcd_resume NULL
+#endif
+
+static int locomolcd_probe(struct locomo_dev *dev)
{
unsigned long flags;
local_irq_save(flags);
locomolcd_dev = dev;
+ locomo_gpio_set_dir(dev, LOCOMO_GPIO_FL_VR, 0);
+
/* the poodle_lcd_power function is called for the first time
* from fs_initcall, which is before locomo is activated.
* We need to recall poodle_lcd_power here*/
-#ifdef CONFIG_MACH_POODLE
- locomolcd_power(1);
-#endif
+ if (machine_is_poodle())
+ locomolcd_power(1);
+
local_irq_restore(flags);
+
+ locomolcd_bl_device = backlight_device_register("locomo-bl", NULL, &locomobl_data);
+
+ if (IS_ERR (locomolcd_bl_device))
+ return PTR_ERR (locomolcd_bl_device);
+
+ /* Set up frontlight so that screen is readable */
+ locomobl_data.brightness = 2;
+ locomolcd_set_intensity(locomolcd_bl_device);
+
return 0;
}
-static int poodle_lcd_remove(struct locomo_dev *dev)
+static int locomolcd_remove(struct locomo_dev *dev)
{
unsigned long flags;
+
+ backlight_device_unregister(locomolcd_bl_device);
local_irq_save(flags);
locomolcd_dev = NULL;
local_irq_restore(flags);
@@ -136,19 +213,33 @@ static struct locomo_driver poodle_lcd_driver = {
.name = "locomo-backlight",
},
.devid = LOCOMO_DEVID_BACKLIGHT,
- .probe = poodle_lcd_probe,
- .remove = poodle_lcd_remove,
+ .probe = locomolcd_probe,
+ .remove = locomolcd_remove,
+ .suspend = locomolcd_suspend,
+ .resume = locomolcd_resume,
};
-static int __init poodle_lcd_init(void)
+
+static int __init locomolcd_init(void)
{
int ret = locomo_driver_register(&poodle_lcd_driver);
- if (ret) return ret;
+ if (ret)
+ return ret;
#ifdef CONFIG_SA1100_COLLIE
sa1100fb_lcd_power = locomolcd_power;
#endif
return 0;
}
-device_initcall(poodle_lcd_init);
+static void __exit locomolcd_exit(void)
+{
+ locomo_driver_unregister(&poodle_lcd_driver);
+}
+
+module_init(locomolcd_init);
+module_exit(locomolcd_exit);
+
+MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>, Pavel Machek <pavel@suse.cz>");
+MODULE_DESCRIPTION("Collie LCD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index 8ba6152db2fd..ad8a89bf8eae 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -230,6 +230,7 @@ static inline void fast_imageblit(const struct fb_image *image, struct fb_info *
tab = cfb_tab16;
break;
case 32:
+ default:
tab = cfb_tab32;
break;
}
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 1103010af54a..dda240eb7360 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2227,7 +2227,6 @@ static void cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
release_region(0x3C0, 32);
pci_release_regions(pdev);
framebuffer_release(cinfo->info);
- pci_disable_device(pdev);
}
#endif /* CONFIG_PCI */
@@ -2458,7 +2457,6 @@ err_release_regions:
err_release_fb:
framebuffer_release(info);
err_disable:
- pci_disable_device(pdev);
err_out:
return ret;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 47ba1a79adcd..5dc4083552d8 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -125,6 +125,8 @@ static int softback_lines;
static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
+static int fbcon_has_exited;
+
/* font data */
static char fontname[40];
@@ -140,7 +142,6 @@ static const struct consw fb_con;
#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
-static void fbcon_free_font(struct display *);
static int fbcon_set_origin(struct vc_data *);
#define CURSOR_DRAW_DELAY (1)
@@ -194,6 +195,9 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
+static void fbcon_start(void);
+static void fbcon_exit(void);
+static struct class_device *fbcon_class_device;
#ifdef CONFIG_MAC
/*
@@ -252,7 +256,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
if (!ops || ops->currcon < 0 || rotate > 3)
return;
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
registered_fb[con2fb_map[i]] != info)
@@ -389,15 +393,18 @@ static void fb_flashcursor(void *private)
int c;
int mode;
- if (ops->currcon != -1)
+ acquire_console_sem();
+ if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
if (!vc || !CON_IS_VISIBLE(vc) ||
fbcon_is_inactive(vc, info) ||
registered_fb[con2fb_map[vc->vc_num]] != info ||
- vc_cons[ops->currcon].d->vc_deccm != 1)
+ vc_cons[ops->currcon].d->vc_deccm != 1) {
+ release_console_sem();
return;
- acquire_console_sem();
+ }
+
p = &fb_display[vc->vc_num];
c = scr_readw((u16 *) vc->vc_pos);
mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
@@ -528,7 +535,7 @@ static int search_fb_in_map(int idx)
{
int i, retval = 0;
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
retval = 1;
}
@@ -539,7 +546,7 @@ static int search_for_mapped_con(void)
{
int i, retval = 0;
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != -1)
retval = 1;
}
@@ -561,6 +568,7 @@ static int fbcon_takeover(int show_logo)
err = take_over_console(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
+
if (err) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
con2fb_map[i] = -1;
@@ -795,8 +803,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (oldidx == newidx)
return 0;
- if (!info)
- err = -EINVAL;
+ if (!info || fbcon_has_exited)
+ return -EINVAL;
if (!err && !search_for_mapped_con()) {
info_idx = newidx;
@@ -832,6 +840,9 @@ static int set_con2fb_map(int unit, int newidx, int user)
con2fb_init_display(vc, info, unit, show_logo);
}
+ if (!search_fb_in_map(info_idx))
+ info_idx = newidx;
+
release_console_sem();
return err;
}
@@ -1034,6 +1045,7 @@ static const char *fbcon_startup(void)
#endif /* CONFIG_MAC */
fbcon_add_cursor_timer(info);
+ fbcon_has_exited = 0;
return display_desc;
}
@@ -1061,17 +1073,36 @@ static void fbcon_init(struct vc_data *vc, int init)
/* If we are not the first console on this
fb, copy the font from that console */
- t = &fb_display[svc->vc_num];
- if (!vc->vc_font.data) {
- vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
- vc->vc_font.width = (*default_mode)->vc_font.width;
- vc->vc_font.height = (*default_mode)->vc_font.height;
- p->userfont = t->userfont;
- if (p->userfont)
- REFCOUNT(p->fontdata)++;
+ t = &fb_display[fg_console];
+ if (!p->fontdata) {
+ if (t->fontdata) {
+ struct vc_data *fvc = vc_cons[fg_console].d;
+
+ vc->vc_font.data = (void *)(p->fontdata =
+ fvc->vc_font.data);
+ vc->vc_font.width = fvc->vc_font.width;
+ vc->vc_font.height = fvc->vc_font.height;
+ p->userfont = t->userfont;
+
+ if (p->userfont)
+ REFCOUNT(p->fontdata)++;
+ } else {
+ const struct font_desc *font = NULL;
+
+ if (!fontname[0] || !(font = find_font(fontname)))
+ font = get_default_font(info->var.xres,
+ info->var.yres);
+ vc->vc_font.width = font->width;
+ vc->vc_font.height = font->height;
+ vc->vc_font.data = (void *)(p->fontdata = font->data);
+ vc->vc_font.charcount = 256; /* FIXME Need to
+ support more fonts */
+ }
}
+
if (p->userfont)
charcnt = FNTCHARCNT(p->fontdata);
+
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
if (charcnt == 256) {
@@ -1145,13 +1176,47 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
+static void fbcon_free_font(struct display *p)
+{
+ if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ p->fontdata = NULL;
+ p->userfont = 0;
+}
+
static void fbcon_deinit(struct vc_data *vc)
{
struct display *p = &fb_display[vc->vc_num];
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx;
- if (info_idx != -1)
- return;
fbcon_free_font(p);
+ idx = con2fb_map[vc->vc_num];
+
+ if (idx == -1)
+ goto finished;
+
+ info = registered_fb[idx];
+
+ if (!info)
+ goto finished;
+
+ ops = info->fbcon_par;
+
+ if (!ops)
+ goto finished;
+
+ if (CON_IS_VISIBLE(vc))
+ fbcon_del_cursor_timer(info);
+
+ ops->flags &= ~FBCON_FLAGS_INIT;
+finished:
+
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
+ return;
}
/* ====================================================================== */
@@ -2099,12 +2164,11 @@ static int fbcon_switch(struct vc_data *vc)
if (info->fbops->fb_set_par)
info->fbops->fb_set_par(info);
- if (old_info != info) {
+ if (old_info != info)
fbcon_del_cursor_timer(old_info);
- fbcon_add_cursor_timer(info);
- }
}
+ fbcon_add_cursor_timer(info);
set_blitting_type(vc, info);
ops->cursor_reset = 1;
@@ -2222,14 +2286,6 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
return 0;
}
-static void fbcon_free_font(struct display *p)
-{
- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
- kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
- p->fontdata = NULL;
- p->userfont = 0;
-}
-
static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
{
u8 *fontdata = vc->vc_font.data;
@@ -2443,7 +2499,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
FNTSUM(new_data) = csum;
/* Check if the same font is on some other console already */
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
struct vc_data *tmp = vc_cons[i].d;
if (fb_display[i].userfont &&
@@ -2768,7 +2824,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
if (!ops || ops->currcon < 0)
return;
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
registered_fb[con2fb_map[i]] != info)
@@ -2830,22 +2886,57 @@ static int fbcon_mode_deleted(struct fb_info *info,
return found;
}
+static int fbcon_fb_unregistered(int idx)
+{
+ int i;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] == idx)
+ con2fb_map[i] = -1;
+ }
+
+ if (idx == info_idx) {
+ info_idx = -1;
+
+ for (i = 0; i < FB_MAX; i++) {
+ if (registered_fb[i] != NULL) {
+ info_idx = i;
+ break;
+ }
+ }
+ }
+
+ if (info_idx != -1) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] == -1)
+ con2fb_map[i] = info_idx;
+ }
+ }
+
+ if (!num_registered_fb)
+ unregister_con_driver(&fb_con);
+
+ return 0;
+}
+
static int fbcon_fb_registered(int idx)
{
int ret = 0, i;
if (info_idx == -1) {
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx) {
info_idx = idx;
break;
}
}
+
if (info_idx != -1)
ret = fbcon_takeover(1);
} else {
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- if (con2fb_map_boot[i] == idx)
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map_boot[i] == idx &&
+ con2fb_map[i] == -1)
set_con2fb_map(i, idx, 0);
}
}
@@ -2882,7 +2973,7 @@ static void fbcon_new_modelist(struct fb_info *info)
struct fb_var_screeninfo var;
struct fb_videomode *mode;
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (registered_fb[con2fb_map[i]] != info)
continue;
if (!fb_display[i].mode)
@@ -2910,6 +3001,14 @@ static int fbcon_event_notify(struct notifier_block *self,
struct fb_con2fbmap *con2fb;
int ret = 0;
+ /*
+ * ignore all events except driver registration and deregistration
+ * if fbcon is not active
+ */
+ if (fbcon_has_exited && !(action == FB_EVENT_FB_REGISTERED ||
+ action == FB_EVENT_FB_UNREGISTERED))
+ goto done;
+
switch(action) {
case FB_EVENT_SUSPEND:
fbcon_suspended(info);
@@ -2930,6 +3029,9 @@ static int fbcon_event_notify(struct notifier_block *self,
case FB_EVENT_FB_REGISTERED:
ret = fbcon_fb_registered(info->node);
break;
+ case FB_EVENT_FB_UNREGISTERED:
+ ret = fbcon_fb_unregistered(info->node);
+ break;
case FB_EVENT_SET_CONSOLE_MAP:
con2fb = event->data;
ret = set_con2fb_map(con2fb->console - 1,
@@ -2945,16 +3047,9 @@ static int fbcon_event_notify(struct notifier_block *self,
case FB_EVENT_NEW_MODELIST:
fbcon_new_modelist(info);
break;
- case FB_EVENT_SET_CON_ROTATE:
- fbcon_rotate(info, *(int *)event->data);
- break;
- case FB_EVENT_GET_CON_ROTATE:
- ret = fbcon_get_rotate(info);
- break;
- case FB_EVENT_SET_CON_ROTATE_ALL:
- fbcon_rotate_all(info, *(int *)event->data);
}
+done:
return ret;
}
@@ -2992,27 +3087,181 @@ static struct notifier_block fbcon_event_notifier = {
.notifier_call = fbcon_event_notify,
};
-static int __init fb_console_init(void)
+static ssize_t store_rotate(struct class_device *class_device,
+ const char *buf, size_t count)
{
- int i;
+ struct fb_info *info;
+ int rotate, idx;
+ char **last = NULL;
+
+ if (fbcon_has_exited)
+ return count;
acquire_console_sem();
- fb_register_client(&fbcon_event_notifier);
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+ rotate = simple_strtoul(buf, last, 0);
+ fbcon_rotate(info, rotate);
+err:
release_console_sem();
+ return count;
+}
- for (i = 0; i < MAX_NR_CONSOLES; i++)
- con2fb_map[i] = -1;
+static ssize_t store_rotate_all(struct class_device *class_device,
+ const char *buf, size_t count)
+{
+ struct fb_info *info;
+ int rotate, idx;
+ char **last = NULL;
+
+ if (fbcon_has_exited)
+ return count;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+ info = registered_fb[idx];
+ rotate = simple_strtoul(buf, last, 0);
+ fbcon_rotate_all(info, rotate);
+err:
+ release_console_sem();
+ return count;
+}
+
+static ssize_t show_rotate(struct class_device *class_device, char *buf)
+{
+ struct fb_info *info;
+ int rotate = 0, idx;
+
+ if (fbcon_has_exited)
+ return 0;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+ rotate = fbcon_get_rotate(info);
+err:
+ release_console_sem();
+ return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
+}
+
+static struct class_device_attribute class_device_attrs[] = {
+ __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
+ __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
+};
+
+static int fbcon_init_class_device(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_create_file(fbcon_class_device,
+ &class_device_attrs[i]);
+ return 0;
+}
+
+static void fbcon_start(void)
+{
if (num_registered_fb) {
+ int i;
+
+ acquire_console_sem();
+
for (i = 0; i < FB_MAX; i++) {
if (registered_fb[i] != NULL) {
info_idx = i;
break;
}
}
+
+ release_console_sem();
fbcon_takeover(0);
}
+}
+
+static void fbcon_exit(void)
+{
+ struct fb_info *info;
+ int i, j, mapped;
+
+ if (fbcon_has_exited)
+ return;
+
+#ifdef CONFIG_ATARI
+ free_irq(IRQ_AUTO_4, fbcon_vbl_handler);
+#endif
+#ifdef CONFIG_MAC
+ if (MACH_IS_MAC && vbl_detected)
+ free_irq(IRQ_MAC_VBL, fbcon_vbl_handler);
+#endif
+
+ kfree((void *)softback_buf);
+ softback_buf = 0UL;
+
+ for (i = 0; i < FB_MAX; i++) {
+ mapped = 0;
+ info = registered_fb[i];
+
+ if (info == NULL)
+ continue;
+
+ for (j = first_fb_vc; j <= last_fb_vc; j++) {
+ if (con2fb_map[j] == i)
+ mapped = 1;
+ }
+
+ if (mapped) {
+ if (info->fbops->fb_release)
+ info->fbops->fb_release(info, 0);
+ module_put(info->fbops->owner);
+
+ if (info->fbcon_par) {
+ fbcon_del_cursor_timer(info);
+ kfree(info->fbcon_par);
+ info->fbcon_par = NULL;
+ }
+ if (info->queue.func == fb_flashcursor)
+ info->queue.func = NULL;
+ }
+ }
+
+ fbcon_has_exited = 1;
+}
+
+static int __init fb_console_init(void)
+{
+ int i;
+
+ acquire_console_sem();
+ fb_register_client(&fbcon_event_notifier);
+ fbcon_class_device =
+ class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
+
+ if (IS_ERR(fbcon_class_device)) {
+ printk(KERN_WARNING "Unable to create class_device "
+ "for fbcon; errno = %ld\n",
+ PTR_ERR(fbcon_class_device));
+ fbcon_class_device = NULL;
+ } else
+ fbcon_init_class_device();
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ con2fb_map[i] = -1;
+
+ release_console_sem();
+ fbcon_start();
return 0;
}
@@ -3020,12 +3269,24 @@ module_init(fb_console_init);
#ifdef MODULE
+static void __exit fbcon_deinit_class_device(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_remove_file(fbcon_class_device,
+ &class_device_attrs[i]);
+}
+
static void __exit fb_console_exit(void)
{
acquire_console_sem();
fb_unregister_client(&fbcon_event_notifier);
+ fbcon_deinit_class_device();
+ class_device_destroy(fb_class, MKDEV(0, 0));
+ fbcon_exit();
release_console_sem();
- give_up_console(&fb_con);
+ unregister_con_driver(&fb_con);
}
module_exit(fb_console_exit);
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index c38c3d8e7a74..3487a636370a 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -175,6 +175,7 @@ extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
extern void fbcon_set_bitops(struct fbcon_ops *ops);
extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
+extern struct class *fb_class;
#define FBCON_ATTRIBUTE_UNDERLINE 1
#define FBCON_ATTRIBUTE_REVERSE 2
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 7f939d066a5a..c89f90edf8ac 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -308,7 +308,7 @@ static void __init mda_initialize(void)
outb_p(0x00, mda_gfx_port);
}
-static const char __init *mdacon_startup(void)
+static const char *mdacon_startup(void)
{
mda_num_columns = 80;
mda_num_lines = 25;
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e99fe30e568c..03041311711b 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -51,6 +51,7 @@ static int topscan;
static int xcurs_correction = 29;
static int newport_xsize;
static int newport_ysize;
+static int newport_has_init;
static int newport_set_def_font(int unit, struct console_font *op);
@@ -283,6 +284,15 @@ static void newport_get_revisions(void)
xcurs_correction = 21;
}
+static void newport_exit(void)
+{
+ int i;
+
+ /* free memory used by user font */
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ newport_set_def_font(i, NULL);
+}
+
/* Can't be __init, take_over_console may call it later */
static const char *newport_startup(void)
{
@@ -290,8 +300,10 @@ static const char *newport_startup(void)
if (!sgi_gfxaddr)
return NULL;
- npregs = (struct newport_regs *) /* ioremap cannot fail */
- ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
+
+ if (!npregs)
+ npregs = (struct newport_regs *)/* ioremap cannot fail */
+ ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
npregs->cset.config = NPORT_CFG_GD0;
if (newport_wait(npregs))
@@ -307,11 +319,11 @@ static const char *newport_startup(void)
newport_reset();
newport_get_revisions();
newport_get_screensize();
+ newport_has_init = 1;
return "SGI Newport";
out_unmap:
- iounmap((void *)npregs);
return NULL;
}
@@ -324,11 +336,10 @@ static void newport_init(struct vc_data *vc, int init)
static void newport_deinit(struct vc_data *c)
{
- int i;
-
- /* free memory used by user font */
- for (i = 0; i < MAX_NR_CONSOLES; i++)
- newport_set_def_font(i, NULL);
+ if (!con_is_bound(&newport_con) && newport_has_init) {
+ newport_exit();
+ newport_has_init = 0;
+ }
}
static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
@@ -728,16 +739,23 @@ const struct consw newport_con = {
#ifdef MODULE
static int __init newport_console_init(void)
{
+
+ if (!sgi_gfxaddr)
+ return NULL;
+
+ if (!npregs)
+ npregs = (struct newport_regs *)/* ioremap cannot fail */
+ ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
+
return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
}
+module_init(newport_console_init);
static void __exit newport_console_exit(void)
{
give_up_console(&newport_con);
iounmap((void *)npregs);
}
-
-module_init(newport_console_init);
module_exit(newport_console_exit);
#endif
diff --git a/drivers/video/console/promcon.c b/drivers/video/console/promcon.c
index 04f42fcaac59..d6e6ad537f9f 100644
--- a/drivers/video/console/promcon.c
+++ b/drivers/video/console/promcon.c
@@ -109,7 +109,7 @@ promcon_end(struct vc_data *conp, char *b)
return b - p;
}
-const char __init *promcon_startup(void)
+const char *promcon_startup(void)
{
const char *display_desc = "PROM";
int node;
@@ -133,7 +133,7 @@ const char __init *promcon_startup(void)
return display_desc;
}
-static void __init
+static void
promcon_init_unimap(struct vc_data *conp)
{
mm_segment_t old_fs = get_fs();
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index fd5940f41271..45c4f227e56e 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -75,7 +75,7 @@ static inline void cursor_undrawn(void)
cursor_drawn = 0;
}
-static const char *__init sticon_startup(void)
+static const char *sticon_startup(void)
{
return "STI console";
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index e64d42e2449e..01401cd63ac0 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -114,6 +114,7 @@ static int vga_512_chars;
static int vga_video_font_height;
static int vga_scan_lines;
static unsigned int vga_rolled_over = 0;
+static int vga_init_done;
static int __init no_scroll(char *str)
{
@@ -190,7 +191,7 @@ static void vgacon_scrollback_init(int pitch)
}
}
-static void __init vgacon_scrollback_startup(void)
+static void vgacon_scrollback_startup(void)
{
vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
* 1024);
@@ -355,7 +356,7 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
}
#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
-static const char __init *vgacon_startup(void)
+static const char *vgacon_startup(void)
{
const char *display_desc = NULL;
u16 saved1, saved2;
@@ -389,7 +390,7 @@ static const char __init *vgacon_startup(void)
vga_video_port_val = VGA_CRT_DM;
if ((ORIG_VIDEO_EGA_BX & 0xff) != 0x10) {
static struct resource ega_console_resource =
- { "ega", 0x3B0, 0x3BF };
+ { .name = "ega", .start = 0x3B0, .end = 0x3BF };
vga_video_type = VIDEO_TYPE_EGAM;
vga_vram_size = 0x8000;
display_desc = "EGA+";
@@ -397,9 +398,9 @@ static const char __init *vgacon_startup(void)
&ega_console_resource);
} else {
static struct resource mda1_console_resource =
- { "mda", 0x3B0, 0x3BB };
+ { .name = "mda", .start = 0x3B0, .end = 0x3BB };
static struct resource mda2_console_resource =
- { "mda", 0x3BF, 0x3BF };
+ { .name = "mda", .start = 0x3BF, .end = 0x3BF };
vga_video_type = VIDEO_TYPE_MDA;
vga_vram_size = 0x2000;
display_desc = "*MDA";
@@ -422,14 +423,14 @@ static const char __init *vgacon_startup(void)
if (!ORIG_VIDEO_ISVGA) {
static struct resource ega_console_resource
- = { "ega", 0x3C0, 0x3DF };
+ = { .name = "ega", .start = 0x3C0, .end = 0x3DF };
vga_video_type = VIDEO_TYPE_EGAC;
display_desc = "EGA";
request_resource(&ioport_resource,
&ega_console_resource);
} else {
static struct resource vga_console_resource
- = { "vga+", 0x3C0, 0x3DF };
+ = { .name = "vga+", .start = 0x3C0, .end = 0x3DF };
vga_video_type = VIDEO_TYPE_VGAC;
display_desc = "VGA+";
request_resource(&ioport_resource,
@@ -473,7 +474,7 @@ static const char __init *vgacon_startup(void)
}
} else {
static struct resource cga_console_resource =
- { "cga", 0x3D4, 0x3D5 };
+ { .name = "cga", .start = 0x3D4, .end = 0x3D5 };
vga_video_type = VIDEO_TYPE_CGA;
vga_vram_size = 0x2000;
display_desc = "*CGA";
@@ -523,7 +524,12 @@ static const char __init *vgacon_startup(void)
vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH;
vgacon_yres = vga_scan_lines;
- vgacon_scrollback_startup();
+
+ if (!vga_init_done) {
+ vgacon_scrollback_startup();
+ vga_init_done = 1;
+ }
+
return display_desc;
}
@@ -531,10 +537,20 @@ static void vgacon_init(struct vc_data *c, int init)
{
unsigned long p;
- /* We cannot be loaded as a module, therefore init is always 1 */
+ /*
+ * We cannot be loaded as a module, therefore init is always 1,
+ * but vgacon_init can be called more than once, and init will
+ * not be 1.
+ */
c->vc_can_do_color = vga_can_do_color;
- c->vc_cols = vga_video_num_columns;
- c->vc_rows = vga_video_num_lines;
+
+ /* set dimensions manually if init != 0 since vc_resize() will fail */
+ if (init) {
+ c->vc_cols = vga_video_num_columns;
+ c->vc_rows = vga_video_num_lines;
+ } else
+ vc_resize(c, vga_video_num_columns, vga_video_num_lines);
+
c->vc_scan_lines = vga_scan_lines;
c->vc_font.height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index 082759447bf6..f0a621ecc288 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -605,11 +605,6 @@ static void clearfb16(struct fb_info *info)
fb_writeb(0, dst);
}
-static void epson1355fb_platform_release(struct device *device)
-{
- dev_err(device, "This driver is broken, please bug the authors so they will fix it.\n");
-}
-
static int epson1355fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -733,13 +728,7 @@ static struct platform_driver epson1355fb_driver = {
},
};
-static struct platform_device epson1355fb_device = {
- .name = "epson1355fb",
- .id = 0,
- .dev = {
- .release = epson1355fb_platform_release,
- }
-};
+static struct platform_device *epson1355fb_device;
int __init epson1355fb_init(void)
{
@@ -749,11 +738,21 @@ int __init epson1355fb_init(void)
return -ENODEV;
ret = platform_driver_register(&epson1355fb_driver);
+
if (!ret) {
- ret = platform_device_register(&epson1355fb_device);
- if (ret)
+ epson1355fb_device = platform_device_alloc("epson1355fb", 0);
+
+ if (epson1355fb_device)
+ ret = platform_device_add(epson1355fb_device);
+ else
+ ret = -ENOMEM;
+
+ if (ret) {
+ platform_device_put(epson1355fb_device);
platform_driver_unregister(&epson1355fb_driver);
+ }
}
+
return ret;
}
@@ -762,7 +761,7 @@ module_init(epson1355fb_init);
#ifdef MODULE
static void __exit epson1355fb_exit(void)
{
- platform_device_unregister(&epson1355fb_device);
+ platform_device_unregister(epson1355fb_device);
platform_driver_unregister(&epson1355fb_driver);
}
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index ac90883dc3aa..b5498999c4ec 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -376,4 +376,3 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
return 0;
}
-EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 372aa1776827..31143afe7c95 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -34,7 +34,6 @@
#endif
#include <linux/devfs_fs_kernel.h>
#include <linux/err.h>
-#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/efi.h>
@@ -162,7 +161,6 @@ char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size
}
#ifdef CONFIG_LOGO
-#include <linux/linux_logo.h>
static inline unsigned safe_shift(unsigned d, int n)
{
@@ -336,11 +334,11 @@ static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
{
- int i, j, w = width - 1;
+ int i, j, h = height - 1;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
- out[height * j + w - i] = *in++;
+ out[height * j + h - i] = *in++;
}
static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
@@ -358,24 +356,24 @@ static void fb_rotate_logo(struct fb_info *info, u8 *dst,
u32 tmp;
if (rotate == FB_ROTATE_UD) {
- image->dx = info->var.xres - image->width;
- image->dy = info->var.yres - image->height;
fb_rotate_logo_ud(image->data, dst, image->width,
image->height);
+ image->dx = info->var.xres - image->width;
+ image->dy = info->var.yres - image->height;
} else if (rotate == FB_ROTATE_CW) {
- tmp = image->width;
- image->width = image->height;
- image->height = tmp;
- image->dx = info->var.xres - image->height;
fb_rotate_logo_cw(image->data, dst, image->width,
image->height);
- } else if (rotate == FB_ROTATE_CCW) {
tmp = image->width;
image->width = image->height;
image->height = tmp;
- image->dy = info->var.yres - image->width;
+ image->dx = info->var.xres - image->width;
+ } else if (rotate == FB_ROTATE_CCW) {
fb_rotate_logo_ccw(image->data, dst, image->width,
image->height);
+ tmp = image->width;
+ image->width = image->height;
+ image->height = tmp;
+ image->dy = info->var.yres - image->height;
}
image->data = dst;
@@ -435,7 +433,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
depth = info->var.green.length;
}
- if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) {
+ if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
/* assume console colormap */
depth = 4;
}
@@ -1278,8 +1276,8 @@ static struct file_operations fb_fops = {
#endif
};
-static struct class *fb_class;
-
+struct class *fb_class;
+EXPORT_SYMBOL(fb_class);
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
@@ -1355,6 +1353,7 @@ register_framebuffer(struct fb_info *fb_info)
int
unregister_framebuffer(struct fb_info *fb_info)
{
+ struct fb_event event;
int i;
i = fb_info->node;
@@ -1362,13 +1361,17 @@ unregister_framebuffer(struct fb_info *fb_info)
return -EINVAL;
devfs_remove("fb/%d", i);
- if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
+ if (fb_info->pixmap.addr &&
+ (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[i]=NULL;
num_registered_fb--;
fb_cleanup_class_device(fb_info);
class_device_destroy(fb_class, MKDEV(FB_MAJOR, i));
+ event.info = fb_info;
+ blocking_notifier_call_chain(&fb_notifier_list,
+ FB_EVENT_FB_UNREGISTERED, &event);
return 0;
}
@@ -1491,28 +1494,6 @@ int fb_new_modelist(struct fb_info *info)
return err;
}
-/**
- * fb_con_duit - user<->fbcon passthrough
- * @info: struct fb_info
- * @event: notification event to be passed to fbcon
- * @data: private data
- *
- * DESCRIPTION
- * This function is an fbcon-user event passing channel
- * which bypasses fbdev. This is hopefully temporary
- * until a user interface for fbcon is created
- */
-int fb_con_duit(struct fb_info *info, int event, void *data)
-{
- struct fb_event evnt;
-
- evnt.info = info;
- evnt.data = data;
-
- return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt);
-}
-EXPORT_SYMBOL(fb_con_duit);
-
static char *video_options[FB_MAX];
static int ofonly;
@@ -1622,6 +1603,5 @@ EXPORT_SYMBOL(fb_set_suspend);
EXPORT_SYMBOL(fb_register_client);
EXPORT_SYMBOL(fb_unregister_client);
EXPORT_SYMBOL(fb_get_options);
-EXPORT_SYMBOL(fb_new_modelist);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 53beeb4a9998..3ccfff715a51 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -29,9 +29,9 @@
#include <linux/tty.h>
#include <linux/fb.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <video/edid.h>
#ifdef CONFIG_PPC_OF
-#include <linux/pci.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
@@ -605,6 +605,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
DPRINTK(" Monitor Operating Limits: ");
+
for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
if (edid_is_limits_block(block)) {
specs->hfmin = H_MIN_RATE * 1000;
@@ -618,11 +619,12 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
break;
}
}
-
+
/* estimate monitor limits based on modes supported */
if (retval) {
- struct fb_videomode *modes;
+ struct fb_videomode *modes, *mode;
int num_modes, i, hz, hscan, pixclock;
+ int vtotal, htotal;
modes = fb_create_modedb(edid, &num_modes);
if (!modes) {
@@ -632,20 +634,38 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
retval = 0;
for (i = 0; i < num_modes; i++) {
- hz = modes[i].refresh;
+ mode = &modes[i];
pixclock = PICOS2KHZ(modes[i].pixclock) * 1000;
- hscan = (modes[i].yres * 105 * hz + 5000)/100;
+ htotal = mode->xres + mode->right_margin + mode->hsync_len
+ + mode->left_margin;
+ vtotal = mode->yres + mode->lower_margin + mode->vsync_len
+ + mode->upper_margin;
+
+ if (mode->vmode & FB_VMODE_INTERLACED)
+ vtotal /= 2;
+
+ if (mode->vmode & FB_VMODE_DOUBLE)
+ vtotal *= 2;
+
+ hscan = (pixclock + htotal / 2) / htotal;
+ hscan = (hscan + 500) / 1000 * 1000;
+ hz = (hscan + vtotal / 2) / vtotal;
if (specs->dclkmax == 0 || specs->dclkmax < pixclock)
specs->dclkmax = pixclock;
+
if (specs->dclkmin == 0 || specs->dclkmin > pixclock)
specs->dclkmin = pixclock;
+
if (specs->hfmax == 0 || specs->hfmax < hscan)
specs->hfmax = hscan;
+
if (specs->hfmin == 0 || specs->hfmin > hscan)
specs->hfmin = hscan;
+
if (specs->vfmax == 0 || specs->vfmax < hz)
specs->vfmax = hz;
+
if (specs->vfmin == 0 || specs->vfmin > hz)
specs->vfmin = hz;
}
@@ -1281,8 +1301,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
-EINVAL : 0;
}
-#if defined(CONFIG_FB_FIRMWARE_EDID) && defined(__i386__)
-#include <linux/pci.h>
+#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
/*
* We need to ensure that the EDID block is only returned for
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 3ceb8c1b392e..4f78f234473d 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -100,13 +100,22 @@ static int mode_string(char *buf, unsigned int offset,
const struct fb_videomode *mode)
{
char m = 'U';
+ char v = 'p';
+
if (mode->flag & FB_MODE_IS_DETAILED)
m = 'D';
if (mode->flag & FB_MODE_IS_VESA)
m = 'V';
if (mode->flag & FB_MODE_IS_STANDARD)
m = 'S';
- return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d-%d\n", m, mode->xres, mode->yres, mode->refresh);
+
+ if (mode->vmode & FB_VMODE_INTERLACED)
+ v = 'i';
+ if (mode->vmode & FB_VMODE_DOUBLE)
+ v = 'd';
+
+ return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d%c-%d\n",
+ m, mode->xres, mode->yres, v, mode->refresh);
}
static ssize_t store_mode(struct class_device *class_device, const char * buf,
@@ -238,45 +247,6 @@ static ssize_t show_rotate(struct class_device *class_device, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate);
}
-static ssize_t store_con_rotate(struct class_device *class_device,
- const char *buf, size_t count)
-{
- struct fb_info *fb_info = class_get_devdata(class_device);
- int rotate;
- char **last = NULL;
-
- acquire_console_sem();
- rotate = simple_strtoul(buf, last, 0);
- fb_con_duit(fb_info, FB_EVENT_SET_CON_ROTATE, &rotate);
- release_console_sem();
- return count;
-}
-
-static ssize_t store_con_rotate_all(struct class_device *class_device,
- const char *buf, size_t count)
-{
- struct fb_info *fb_info = class_get_devdata(class_device);
- int rotate;
- char **last = NULL;
-
- acquire_console_sem();
- rotate = simple_strtoul(buf, last, 0);
- fb_con_duit(fb_info, FB_EVENT_SET_CON_ROTATE_ALL, &rotate);
- release_console_sem();
- return count;
-}
-
-static ssize_t show_con_rotate(struct class_device *class_device, char *buf)
-{
- struct fb_info *fb_info = class_get_devdata(class_device);
- int rotate;
-
- acquire_console_sem();
- rotate = fb_con_duit(fb_info, FB_EVENT_GET_CON_ROTATE, NULL);
- release_console_sem();
- return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
-}
-
static ssize_t store_virtual(struct class_device *class_device,
const char * buf, size_t count)
{
@@ -493,8 +463,6 @@ static struct class_device_attribute class_device_attrs[] = {
__ATTR(name, S_IRUGO, show_name, NULL),
__ATTR(stride, S_IRUGO, show_stride, NULL),
__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
- __ATTR(con_rotate, S_IRUGO|S_IWUSR, show_con_rotate, store_con_rotate),
- __ATTR(con_rotate_all, S_IWUSR, NULL, store_con_rotate_all),
__ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
#ifdef CONFIG_FB_BACKLIGHT
__ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve),
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index 20e69156d728..4d3a8871d3d1 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -376,8 +376,6 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
}
- pci_disable_device(pdev);
-
if (info)
framebuffer_release(info);
return ret;
@@ -399,7 +397,6 @@ static void gx1fb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 89c34b15f5d4..5ef12a3dfa50 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -354,8 +354,6 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
pci_release_region(pdev, 2);
}
- pci_disable_device(pdev);
-
if (info)
framebuffer_release(info);
return ret;
@@ -377,7 +375,6 @@ static void gxfb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
pci_release_region(pdev, 2);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 44aa2ffff973..a1f7d80f0ac1 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2110,9 +2110,6 @@ static void i810fb_release_resource(struct fb_info *info,
if (par->res_flags & MMIO_REQ)
release_mem_region(par->mmio_start_phys, MMIO_SIZE);
- if (par->res_flags & PCI_DEVICE_ENABLED)
- pci_disable_device(par->dev);
-
framebuffer_release(info);
}
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
new file mode 100644
index 000000000000..7b1c168c834d
--- /dev/null
+++ b/drivers/video/imacfb.c
@@ -0,0 +1,345 @@
+/*
+ * framebuffer driver for Intel Based Mac's
+ *
+ * (c) 2006 Edgar Hucek <gimli@dark-green.com>
+ * Original imac driver written by Gerd Knorr <kraxel@goldbach.in-berlin.de>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+
+#include <asm/io.h>
+
+#include <video/vga.h>
+
+typedef enum _MAC_TYPE {
+ M_I17,
+ M_I20,
+ M_MINI,
+ M_MACBOOK,
+ M_NEW
+} MAC_TYPE;
+
+/* --------------------------------------------------------------------- */
+
+static struct fb_var_screeninfo imacfb_defined __initdata = {
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .right_margin = 32,
+ .upper_margin = 16,
+ .lower_margin = 4,
+ .vsync_len = 4,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static struct fb_fix_screeninfo imacfb_fix __initdata = {
+ .id = "IMAC VGA",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .accel = FB_ACCEL_NONE,
+ .visual = FB_VISUAL_TRUECOLOR,
+};
+
+static int inverse;
+static int model = M_NEW;
+static int manual_height;
+static int manual_width;
+
+#define DEFAULT_FB_MEM 1024*1024*16
+
+/* --------------------------------------------------------------------- */
+
+static int imacfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ /*
+ * Set a single color register. The values supplied are
+ * already rounded down to the hardware's capabilities
+ * (according to the entries in the `var' structure). Return
+ * != 0 for invalid regno.
+ */
+
+ if (regno >= info->cmap.len)
+ return 1;
+
+ if (regno < 16) {
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ ((u32 *)(info->pseudo_palette))[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ }
+ return 0;
+}
+
+static struct fb_ops imacfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = imacfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __init imacfb_setup(char *options)
+{
+ char *this_opt;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!*this_opt) continue;
+
+ if (!strcmp(this_opt, "inverse"))
+ inverse = 1;
+ else if (!strcmp(this_opt, "i17"))
+ model = M_I17;
+ else if (!strcmp(this_opt, "i20"))
+ model = M_I20;
+ else if (!strcmp(this_opt, "mini"))
+ model = M_MINI;
+ else if (!strcmp(this_opt, "macbook"))
+ model = M_MACBOOK;
+ else if (!strncmp(this_opt, "height:", 7))
+ manual_height = simple_strtoul(this_opt+7, NULL, 0);
+ else if (!strncmp(this_opt, "width:", 6))
+ manual_width = simple_strtoul(this_opt+6, NULL, 0);
+ }
+ return 0;
+}
+
+static int __init imacfb_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ int err;
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
+
+ screen_info.lfb_depth = 32;
+ screen_info.lfb_size = DEFAULT_FB_MEM / 0x10000;
+ screen_info.pages=1;
+ screen_info.blue_size = 8;
+ screen_info.blue_pos = 0;
+ screen_info.green_size = 8;
+ screen_info.green_pos = 8;
+ screen_info.red_size = 8;
+ screen_info.red_pos = 16;
+ screen_info.rsvd_size = 8;
+ screen_info.rsvd_pos = 24;
+
+ switch (model) {
+ case M_I17:
+ screen_info.lfb_width = 1440;
+ screen_info.lfb_height = 900;
+ screen_info.lfb_linelength = 1472 * 4;
+ screen_info.lfb_base = 0x80010000;
+ break;
+ case M_NEW:
+ case M_I20:
+ screen_info.lfb_width = 1680;
+ screen_info.lfb_height = 1050;
+ screen_info.lfb_linelength = 1728 * 4;
+ screen_info.lfb_base = 0x80010000;
+ break;
+ case M_MINI:
+ screen_info.lfb_width = 1024;
+ screen_info.lfb_height = 768;
+ screen_info.lfb_linelength = 2048 * 4;
+ screen_info.lfb_base = 0x80000000;
+ break;
+ case M_MACBOOK:
+ screen_info.lfb_width = 1280;
+ screen_info.lfb_height = 800;
+ screen_info.lfb_linelength = 2048 * 4;
+ screen_info.lfb_base = 0x80000000;
+ break;
+ }
+
+ /* if the user wants to manually specify height/width,
+ we will override the defaults */
+ /* TODO: eventually get auto-detection working */
+ if (manual_height > 0)
+ screen_info.lfb_height = manual_height;
+ if (manual_width > 0)
+ screen_info.lfb_width = manual_width;
+
+ imacfb_fix.smem_start = screen_info.lfb_base;
+ imacfb_defined.bits_per_pixel = screen_info.lfb_depth;
+ imacfb_defined.xres = screen_info.lfb_width;
+ imacfb_defined.yres = screen_info.lfb_height;
+ imacfb_fix.line_length = screen_info.lfb_linelength;
+
+ /* size_vmode -- that is the amount of memory needed for the
+ * used video mode, i.e. the minimum amount of
+ * memory we need. */
+ size_vmode = imacfb_defined.yres * imacfb_fix.line_length;
+
+ /* size_total -- all video memory we have. Used for
+ * entries, ressource allocation and bounds
+ * checking. */
+ size_total = screen_info.lfb_size * 65536;
+ if (size_total < size_vmode)
+ size_total = size_vmode;
+
+ /* size_remap -- the amount of video memory we are going to
+ * use for imacfb. With modern cards it is no
+ * option to simply use size_total as that
+ * wastes plenty of kernel address space. */
+ size_remap = size_vmode * 2;
+ if (size_remap < size_vmode)
+ size_remap = size_vmode;
+ if (size_remap > size_total)
+ size_remap = size_total;
+ imacfb_fix.smem_len = size_remap;
+
+#ifndef __i386__
+ screen_info.imacpm_seg = 0;
+#endif
+
+ if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) {
+ printk(KERN_WARNING
+ "imacfb: cannot reserve video memory at 0x%lx\n",
+ imacfb_fix.smem_start);
+ /* We cannot make this fatal. Sometimes this comes from magic
+ spaces our resource handlers simply don't know about */
+ }
+
+ info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
+ if (!info) {
+ err = -ENOMEM;
+ goto err_release_mem;
+ }
+ info->pseudo_palette = info->par;
+ info->par = NULL;
+
+ info->screen_base = ioremap(imacfb_fix.smem_start, imacfb_fix.smem_len);
+ if (!info->screen_base) {
+ printk(KERN_ERR "imacfb: abort, cannot ioremap video memory "
+ "0x%x @ 0x%lx\n",
+ imacfb_fix.smem_len, imacfb_fix.smem_start);
+ err = -EIO;
+ goto err_unmap;
+ }
+
+ printk(KERN_INFO "imacfb: framebuffer at 0x%lx, mapped to 0x%p, "
+ "using %dk, total %dk\n",
+ imacfb_fix.smem_start, info->screen_base,
+ size_remap/1024, size_total/1024);
+ printk(KERN_INFO "imacfb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ imacfb_defined.xres, imacfb_defined.yres,
+ imacfb_defined.bits_per_pixel, imacfb_fix.line_length,
+ screen_info.pages);
+
+ imacfb_defined.xres_virtual = imacfb_defined.xres;
+ imacfb_defined.yres_virtual = imacfb_fix.smem_len /
+ imacfb_fix.line_length;
+ printk(KERN_INFO "imacfb: scrolling: redraw\n");
+ imacfb_defined.yres_virtual = imacfb_defined.yres;
+
+ /* some dummy values for timing to make fbset happy */
+ imacfb_defined.pixclock = 10000000 / imacfb_defined.xres *
+ 1000 / imacfb_defined.yres;
+ imacfb_defined.left_margin = (imacfb_defined.xres / 8) & 0xf8;
+ imacfb_defined.hsync_len = (imacfb_defined.xres / 8) & 0xf8;
+
+ imacfb_defined.red.offset = screen_info.red_pos;
+ imacfb_defined.red.length = screen_info.red_size;
+ imacfb_defined.green.offset = screen_info.green_pos;
+ imacfb_defined.green.length = screen_info.green_size;
+ imacfb_defined.blue.offset = screen_info.blue_pos;
+ imacfb_defined.blue.length = screen_info.blue_size;
+ imacfb_defined.transp.offset = screen_info.rsvd_pos;
+ imacfb_defined.transp.length = screen_info.rsvd_size;
+
+ printk(KERN_INFO "imacfb: %s: "
+ "size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
+ "Truecolor",
+ screen_info.rsvd_size,
+ screen_info.red_size,
+ screen_info.green_size,
+ screen_info.blue_size,
+ screen_info.rsvd_pos,
+ screen_info.red_pos,
+ screen_info.green_pos,
+ screen_info.blue_pos);
+
+ imacfb_fix.ypanstep = 0;
+ imacfb_fix.ywrapstep = 0;
+
+ /* request failure does not faze us, as vgacon probably has this
+ * region already (FIXME) */
+ request_region(0x3c0, 32, "imacfb");
+
+ info->fbops = &imacfb_ops;
+ info->var = imacfb_defined;
+ info->fix = imacfb_fix;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+ if (register_framebuffer(info)<0) {
+ err = -EINVAL;
+ goto err_fb_dealoc;
+ }
+ printk(KERN_INFO "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+ return 0;
+
+err_fb_dealoc:
+ fb_dealloc_cmap(&info->cmap);
+err_unmap:
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+err_release_mem:
+ release_mem_region(imacfb_fix.smem_start, size_total);
+ return err;
+}
+
+static struct platform_driver imacfb_driver = {
+ .probe = imacfb_probe,
+ .driver = {
+ .name = "imacfb",
+ },
+};
+
+static struct platform_device imacfb_device = {
+ .name = "imacfb",
+};
+
+static int __init imacfb_init(void)
+{
+ int ret;
+ char *option = NULL;
+
+ /* ignore error return of fb_get_options */
+ fb_get_options("imacfb", &option);
+ imacfb_setup(option);
+ ret = platform_driver_register(&imacfb_driver);
+
+ if (!ret) {
+ ret = platform_device_register(&imacfb_device);
+ if (ret)
+ platform_driver_unregister(&imacfb_driver);
+ }
+ return ret;
+}
+module_init(imacfb_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index c0385c6f7db5..d21321ca7c39 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -327,7 +327,6 @@ int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
}
return -EINVAL;
}
-EXPORT_SYMBOL(mac_var_to_vmode);
/**
* mac_map_monitor_sense - Convert monitor sense to vmode
@@ -371,8 +370,9 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
*
*/
-int __init mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
- const char *mode_option, unsigned int default_bpp)
+int __devinit mac_find_mode(struct fb_var_screeninfo *var,
+ struct fb_info *info, const char *mode_option,
+ unsigned int default_bpp)
{
const struct fb_videomode *db = NULL;
unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index 232f5a09a499..babeb81f467d 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,9 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
int *cmode);
extern int mac_map_monitor_sense(int sense);
-extern int __init mac_find_mode(struct fb_var_screeninfo *var,
- struct fb_info *info, const char *mode_option,
- unsigned int default_bpp);
+extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
+ struct fb_info *info,
+ const char *mode_option,
+ unsigned int default_bpp);
/*
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 26a1c618a205..ff5454601e22 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -259,6 +259,10 @@ static const struct fb_videomode modedb[] = {
/* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
NULL, 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6,
FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
+ }, {
+ /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
+ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
+ 0, FB_VMODE_NONINTERLACED
},
};
@@ -787,8 +791,9 @@ struct fb_videomode *fb_find_best_mode(struct fb_var_screeninfo *var,
if (diff > d) {
diff = d;
best = mode;
- } else if (diff == d && mode->refresh > best->refresh)
- best = mode;
+ } else if (diff == d && best &&
+ mode->refresh > best->refresh)
+ best = mode;
}
}
return best;
@@ -1016,8 +1021,6 @@ EXPORT_SYMBOL(fb_videomode_to_var);
EXPORT_SYMBOL(fb_var_to_videomode);
EXPORT_SYMBOL(fb_mode_is_equal);
EXPORT_SYMBOL(fb_add_videomode);
-EXPORT_SYMBOL(fb_delete_videomode);
-EXPORT_SYMBOL(fb_destroy_modelist);
EXPORT_SYMBOL(fb_match_mode);
EXPORT_SYMBOL(fb_find_best_mode);
EXPORT_SYMBOL(fb_find_nearest_mode);
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 24b12f71d5a8..2f156b724d1c 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -1333,17 +1333,22 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
* run "setterm -powersave powerdown" to take advantage
*/
struct neofb_par *par = info->par;
- int seqflags, lcdflags, dpmsflags, reg;
-
+ int seqflags, lcdflags, dpmsflags, reg, tmpdisp;
/*
- * Reload the value stored in the register, if sensible. It might have
- * been changed via FN keystroke.
+ * Read back the register bits related to display configuration. They might
+ * have been changed underneath the driver via Fn key stroke.
+ */
+ neoUnlock();
+ tmpdisp = vga_rgfx(NULL, 0x20) & 0x03;
+ neoLock(&par->state);
+
+ /* In case we blank the screen, we want to store the possibly new
+ * configuration in the driver. During un-blank, we re-apply this setting,
+ * since the LCD bit will be cleared in order to switch off the backlight.
*/
if (par->PanelDispCntlRegRead) {
- neoUnlock();
- par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
- neoLock(&par->state);
+ par->PanelDispCntlReg1 = tmpdisp;
}
par->PanelDispCntlRegRead = !blank_mode;
@@ -1378,12 +1383,21 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
break;
case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
- lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */
+ /*
+ * During a blank operation with the LID shut, we might store "LCD off"
+ * by mistake. Due to timing issues, the BIOS may switch the lights
+ * back on, and we turn it back off once we "unblank".
+ *
+ * So here is an attempt to implement ">=" - if we are in the process
+ * of unblanking, and the LCD bit is unset in the driver but set in the
+ * register, we must keep it.
+ */
+ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
break;
case FB_BLANK_UNBLANK: /* unblank */
seqflags = 0; /* Enable sequencer */
- lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */
+ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
#ifdef CONFIG_TOSHIBA
/* Do we still need this ? */
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index 99c3a8e6a237..9ed640d35728 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -886,7 +886,10 @@ void NVCalcStateExt(struct nvidia_par *par,
case NV_ARCH_20:
case NV_ARCH_30:
default:
- if (((par->Chipset & 0xffff) == 0x01A0) ||
+ if ((par->Chipset & 0xfff0) == 0x0240) {
+ state->arbitration0 = 256;
+ state->arbitration1 = 0x0480;
+ } else if (((par->Chipset & 0xffff) == 0x01A0) ||
((par->Chipset & 0xffff) == 0x01f0)) {
nForceUpdateArbitrationSettings(VClk,
pixelDepth * 8,
@@ -1235,6 +1238,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
break;
case 0x0160:
case 0x01D0:
+ case 0x0240:
NV_WR32(par->PMC, 0x1700,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PMC, 0x1704, 0);
@@ -1359,7 +1363,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
if(((par->Chipset & 0xfff0)
!= 0x0160) &&
((par->Chipset & 0xfff0)
- != 0x0220))
+ != 0x0220) &&
+ ((par->Chipset & 0xfff0)
+ != 0x240))
NV_WR32(par->PGRAPH,
0x6900 + i*4,
NV_RD32(par->PFB,
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 03a7c1e9ce38..7b5cffb27851 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -67,359 +67,10 @@
#define MAX_CURS 32
static struct pci_device_id nvidiafb_pci_tbl[] = {
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UTNT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_VTNT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UVTNT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_ITNT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_IGEFORCE2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_DDC,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6600,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0252,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0313,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0316,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0317,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x031D,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x031E,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x031F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0329,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x032F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0345,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0349,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x034B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x034F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x00c0,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_6800A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_6800A_LE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_GO_6800,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_QUADRO_FX_GO1400,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x00cd,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_QUADRO_FX_1400,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0142,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0143,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0144,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0145,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0146,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0147,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0148,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0149,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x014b,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x14c,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x014d,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0160,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0162,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0163,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0165,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0169,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x016b,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x016c,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x016d,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x016e,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0210,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x021d,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x021e,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0220,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0221,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0222,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NVIDIA, 0x0228,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0,} /* terminate list */
+ {PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0},
+ { 0, }
};
-
MODULE_DEVICE_TABLE(pci, nvidiafb_pci_tbl);
/* command line data, set in nvidiafb_setup() */
@@ -1465,10 +1116,10 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
struct nvidia_par *par = info->par;
u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
- printk("nvidiafb: PCI id - %x\n", id);
+ printk(KERN_INFO PFX "Device ID: %x \n", id);
+
if ((id & 0xfff0) == 0x00f0) {
/* pci-e */
- printk("nvidiafb: PCI-E card\n");
id = NV_RD32(par->REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
@@ -1476,9 +1127,9 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
else if ((id & 0xffff0000) == 0xDE100000) /* wrong endian */
id = 0x10DE0000 | ((id << 8) & 0x0000ff00) |
((id >> 8) & 0x000000ff);
+ printk(KERN_INFO PFX "Subsystem ID: %x \n", id);
}
- printk("nvidiafb: Actual id - %x\n", id);
return id;
}
@@ -1520,6 +1171,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
case 0x0210:
case 0x0220:
case 0x0230:
+ case 0x0240:
case 0x0290:
case 0x0390:
arch = NV_ARCH_40;
@@ -1567,7 +1219,7 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
- goto err_out_request;
+ goto err_out_enable;
}
par->FlatPanel = flatpanel;
@@ -1596,7 +1248,6 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
}
par->Chipset = nvidia_get_chipset(info);
- printk(KERN_INFO PFX "nVidia device/chipset %X\n", par->Chipset);
par->Architecture = nvidia_get_arch(info);
if (par->Architecture == 0) {
@@ -1687,10 +1338,8 @@ err_out_free_base1:
nvidia_delete_i2c_busses(par);
err_out_arch:
iounmap(par->REGS);
-err_out_free_base0:
+ err_out_free_base0:
pci_release_regions(pd);
-err_out_request:
- pci_disable_device(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
@@ -1720,7 +1369,6 @@ static void __exit nvidiafb_remove(struct pci_dev *pd)
nvidia_delete_i2c_busses(par);
iounmap(par->REGS);
pci_release_regions(pd);
- pci_disable_device(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
pci_set_drvdata(pd, NULL);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d4384ab1df65..12af58c5cf1f 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2152,7 +2152,6 @@ err_iounmap_ctrl_base:
err_release_region:
pci_release_regions(pd);
err_disable_device:
- pci_disable_device(pd);
err_free_pixmap:
kfree(info->pixmap.addr);
err_framebuffer_release:
@@ -2187,7 +2186,6 @@ static void __exit rivafb_remove(struct pci_dev *pd)
if (par->riva.Architecture == NV_ARCH_03)
iounmap(par->riva.PRAMIN);
pci_release_regions(pd);
- pci_disable_device(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
pci_set_drvdata(pd, NULL);
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 9451932fbaf2..fbc411850686 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -641,6 +641,7 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
int ret;
int irq;
int i;
+ u32 lcdcon1;
mach_info = pdev->dev.platform_data;
if (mach_info == NULL) {
@@ -672,6 +673,11 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
memcpy(&info->regs, &mach_info->regs, sizeof(info->regs));
+ /* Stop the video and unset ENVID if set */
+ info->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID;
+ lcdcon1 = readl(S3C2410_LCDCON1);
+ writel(lcdcon1 & ~S3C2410_LCDCON1_ENVID, S3C2410_LCDCON1);
+
info->mach_info = pdev->dev.platform_data;
fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -794,15 +800,14 @@ dealloc_fb:
* shutdown the lcd controller
*/
-static void s3c2410fb_stop_lcd(void)
+static void s3c2410fb_stop_lcd(struct s3c2410fb_info *fbi)
{
unsigned long flags;
- unsigned long tmp;
local_irq_save(flags);
- tmp = readl(S3C2410_LCDCON1);
- writel(tmp & ~S3C2410_LCDCON1_ENVID, S3C2410_LCDCON1);
+ fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID;
+ writel(fbi->regs.lcdcon1, S3C2410_LCDCON1);
local_irq_restore(flags);
}
@@ -816,7 +821,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
struct s3c2410fb_info *info = fbinfo->par;
int irq;
- s3c2410fb_stop_lcd();
+ s3c2410fb_stop_lcd(info);
msleep(1);
s3c2410fb_unmap_video_memory(info);
@@ -844,7 +849,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
struct fb_info *fbinfo = platform_get_drvdata(dev);
struct s3c2410fb_info *info = fbinfo->par;
- s3c2410fb_stop_lcd();
+ s3c2410fb_stop_lcd(info);
/* sleep before disabling the clock, we need to ensure
* the LCD DMA engine is not going to get back on the bus
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 58cfdfb41833..e648a6c0f6d9 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -147,7 +147,27 @@ struct xtimings {
int interlaced;
};
+struct savage_reg {
+ unsigned char MiscOutReg; /* Misc */
+ unsigned char CRTC[25]; /* Crtc Controller */
+ unsigned char Sequencer[5]; /* Video Sequencer */
+ unsigned char Graphics[9]; /* Video Graphics */
+ unsigned char Attribute[21]; /* Video Atribute */
+ unsigned int mode, refresh;
+ unsigned char SR08, SR0E, SR0F;
+ unsigned char SR10, SR11, SR12, SR13, SR15, SR18, SR29, SR30;
+ unsigned char SR54[8];
+ unsigned char Clock;
+ unsigned char CR31, CR32, CR33, CR34, CR36, CR3A, CR3B, CR3C;
+ unsigned char CR40, CR41, CR42, CR43, CR45;
+ unsigned char CR50, CR51, CR53, CR55, CR58, CR5B, CR5D, CR5E;
+ unsigned char CR60, CR63, CR65, CR66, CR67, CR68, CR69, CR6D, CR6F;
+ unsigned char CR86, CR88;
+ unsigned char CR90, CR91, CRB0;
+ unsigned int STREAMS[22]; /* yuck, streams regs */
+ unsigned int MMPR0, MMPR1, MMPR2, MMPR3;
+};
/* --------------------------------------------------------------------- */
#define NR_PALETTE 256
@@ -167,6 +187,8 @@ struct savagefb_par {
struct pci_dev *pcidev;
savage_chipset chip;
struct savagefb_i2c_chan chan;
+ struct savage_reg state;
+ struct savage_reg save;
unsigned char *edid;
u32 pseudo_palette[16];
int paletteEnabled;
@@ -179,6 +201,7 @@ struct savagefb_par {
int minClock;
int numClocks;
int clock[4];
+ int MCLK, REFCLK, LCDclk;
struct {
u8 __iomem *vbase;
u32 pbase;
@@ -196,7 +219,6 @@ struct savagefb_par {
volatile u32 __iomem *bci_base;
unsigned int bci_ptr;
-
u32 cob_offset;
u32 cob_size;
int cob_index;
@@ -204,7 +226,6 @@ struct savagefb_par {
void (*SavageWaitIdle) (struct savagefb_par *par);
void (*SavageWaitFifo) (struct savagefb_par *par, int space);
- int MCLK, REFCLK, LCDclk;
int HorizScaleFactor;
/* Panels size */
@@ -217,26 +238,6 @@ struct savagefb_par {
int depth;
int vwidth;
-
- unsigned char MiscOutReg; /* Misc */
- unsigned char CRTC[25]; /* Crtc Controller */
- unsigned char Sequencer[5]; /* Video Sequencer */
- unsigned char Graphics[9]; /* Video Graphics */
- unsigned char Attribute[21]; /* Video Atribute */
-
- unsigned int mode, refresh;
- unsigned char SR08, SR0E, SR0F;
- unsigned char SR10, SR11, SR12, SR13, SR15, SR18, SR29, SR30;
- unsigned char SR54[8];
- unsigned char Clock;
- unsigned char CR31, CR32, CR33, CR34, CR36, CR3A, CR3B, CR3C;
- unsigned char CR40, CR41, CR42, CR43, CR45;
- unsigned char CR50, CR51, CR53, CR55, CR58, CR5B, CR5D, CR5E;
- unsigned char CR60, CR63, CR65, CR66, CR67, CR68, CR69, CR6D, CR6F;
- unsigned char CR86, CR88;
- unsigned char CR90, CR91, CRB0;
- unsigned int STREAMS[22]; /* yuck, streams regs */
- unsigned int MMPR0, MMPR1, MMPR2, MMPR3;
};
#define BCI_BD_BW_DISABLE 0x10000000
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 0da624e6524f..78883cf66a4d 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -86,15 +86,15 @@ MODULE_DESCRIPTION("FBDev driver for S3 Savage PCI/AGP Chips");
/* --------------------------------------------------------------------- */
-static void vgaHWSeqReset (struct savagefb_par *par, int start)
+static void vgaHWSeqReset(struct savagefb_par *par, int start)
{
if (start)
- VGAwSEQ (0x00, 0x01, par); /* Synchronous Reset */
+ VGAwSEQ(0x00, 0x01, par); /* Synchronous Reset */
else
- VGAwSEQ (0x00, 0x03, par); /* End Reset */
+ VGAwSEQ(0x00, 0x03, par); /* End Reset */
}
-static void vgaHWProtect (struct savagefb_par *par, int on)
+static void vgaHWProtect(struct savagefb_par *par, int on)
{
unsigned char tmp;
@@ -102,10 +102,10 @@ static void vgaHWProtect (struct savagefb_par *par, int on)
/*
* Turn off screen and disable sequencer.
*/
- tmp = VGArSEQ (0x01, par);
+ tmp = VGArSEQ(0x01, par);
- vgaHWSeqReset (par, 1); /* start synchronous reset */
- VGAwSEQ (0x01, tmp | 0x20, par);/* disable the display */
+ vgaHWSeqReset(par, 1); /* start synchronous reset */
+ VGAwSEQ(0x01, tmp | 0x20, par);/* disable the display */
VGAenablePalette(par);
} else {
@@ -113,75 +113,76 @@ static void vgaHWProtect (struct savagefb_par *par, int on)
* Reenable sequencer, then turn on screen.
*/
- tmp = VGArSEQ (0x01, par);
+ tmp = VGArSEQ(0x01, par);
- VGAwSEQ (0x01, tmp & ~0x20, par);/* reenable display */
- vgaHWSeqReset (par, 0); /* clear synchronous reset */
+ VGAwSEQ(0x01, tmp & ~0x20, par);/* reenable display */
+ vgaHWSeqReset(par, 0); /* clear synchronous reset */
VGAdisablePalette(par);
}
}
-static void vgaHWRestore (struct savagefb_par *par)
+static void vgaHWRestore(struct savagefb_par *par, struct savage_reg *reg)
{
int i;
- VGAwMISC (par->MiscOutReg, par);
+ VGAwMISC(reg->MiscOutReg, par);
for (i = 1; i < 5; i++)
- VGAwSEQ (i, par->Sequencer[i], par);
+ VGAwSEQ(i, reg->Sequencer[i], par);
/* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or
CRTC[17] */
- VGAwCR (17, par->CRTC[17] & ~0x80, par);
+ VGAwCR(17, reg->CRTC[17] & ~0x80, par);
for (i = 0; i < 25; i++)
- VGAwCR (i, par->CRTC[i], par);
+ VGAwCR(i, reg->CRTC[i], par);
for (i = 0; i < 9; i++)
- VGAwGR (i, par->Graphics[i], par);
+ VGAwGR(i, reg->Graphics[i], par);
VGAenablePalette(par);
for (i = 0; i < 21; i++)
- VGAwATTR (i, par->Attribute[i], par);
+ VGAwATTR(i, reg->Attribute[i], par);
VGAdisablePalette(par);
}
-static void vgaHWInit (struct fb_var_screeninfo *var,
- struct savagefb_par *par,
- struct xtimings *timings)
+static void vgaHWInit(struct fb_var_screeninfo *var,
+ struct savagefb_par *par,
+ struct xtimings *timings,
+ struct savage_reg *reg)
{
- par->MiscOutReg = 0x23;
+ reg->MiscOutReg = 0x23;
if (!(timings->sync & FB_SYNC_HOR_HIGH_ACT))
- par->MiscOutReg |= 0x40;
+ reg->MiscOutReg |= 0x40;
if (!(timings->sync & FB_SYNC_VERT_HIGH_ACT))
- par->MiscOutReg |= 0x80;
+ reg->MiscOutReg |= 0x80;
/*
* Time Sequencer
*/
- par->Sequencer[0x00] = 0x00;
- par->Sequencer[0x01] = 0x01;
- par->Sequencer[0x02] = 0x0F;
- par->Sequencer[0x03] = 0x00; /* Font select */
- par->Sequencer[0x04] = 0x0E; /* Misc */
+ reg->Sequencer[0x00] = 0x00;
+ reg->Sequencer[0x01] = 0x01;
+ reg->Sequencer[0x02] = 0x0F;
+ reg->Sequencer[0x03] = 0x00; /* Font select */
+ reg->Sequencer[0x04] = 0x0E; /* Misc */
/*
* CRTC Controller
*/
- par->CRTC[0x00] = (timings->HTotal >> 3) - 5;
- par->CRTC[0x01] = (timings->HDisplay >> 3) - 1;
- par->CRTC[0x02] = (timings->HSyncStart >> 3) - 1;
- par->CRTC[0x03] = (((timings->HSyncEnd >> 3) - 1) & 0x1f) | 0x80;
- par->CRTC[0x04] = (timings->HSyncStart >> 3);
- par->CRTC[0x05] = ((((timings->HSyncEnd >> 3) - 1) & 0x20) << 2) |
+ reg->CRTC[0x00] = (timings->HTotal >> 3) - 5;
+ reg->CRTC[0x01] = (timings->HDisplay >> 3) - 1;
+ reg->CRTC[0x02] = (timings->HSyncStart >> 3) - 1;
+ reg->CRTC[0x03] = (((timings->HSyncEnd >> 3) - 1) & 0x1f) | 0x80;
+ reg->CRTC[0x04] = (timings->HSyncStart >> 3);
+ reg->CRTC[0x05] = ((((timings->HSyncEnd >> 3) - 1) & 0x20) << 2) |
(((timings->HSyncEnd >> 3)) & 0x1f);
- par->CRTC[0x06] = (timings->VTotal - 2) & 0xFF;
- par->CRTC[0x07] = (((timings->VTotal - 2) & 0x100) >> 8) |
+ reg->CRTC[0x06] = (timings->VTotal - 2) & 0xFF;
+ reg->CRTC[0x07] = (((timings->VTotal - 2) & 0x100) >> 8) |
(((timings->VDisplay - 1) & 0x100) >> 7) |
((timings->VSyncStart & 0x100) >> 6) |
(((timings->VSyncStart - 1) & 0x100) >> 5) |
@@ -189,27 +190,27 @@ static void vgaHWInit (struct fb_var_screeninfo *var,
(((timings->VTotal - 2) & 0x200) >> 4) |
(((timings->VDisplay - 1) & 0x200) >> 3) |
((timings->VSyncStart & 0x200) >> 2);
- par->CRTC[0x08] = 0x00;
- par->CRTC[0x09] = (((timings->VSyncStart - 1) & 0x200) >> 4) | 0x40;
+ reg->CRTC[0x08] = 0x00;
+ reg->CRTC[0x09] = (((timings->VSyncStart - 1) & 0x200) >> 4) | 0x40;
if (timings->dblscan)
- par->CRTC[0x09] |= 0x80;
-
- par->CRTC[0x0a] = 0x00;
- par->CRTC[0x0b] = 0x00;
- par->CRTC[0x0c] = 0x00;
- par->CRTC[0x0d] = 0x00;
- par->CRTC[0x0e] = 0x00;
- par->CRTC[0x0f] = 0x00;
- par->CRTC[0x10] = timings->VSyncStart & 0xff;
- par->CRTC[0x11] = (timings->VSyncEnd & 0x0f) | 0x20;
- par->CRTC[0x12] = (timings->VDisplay - 1) & 0xff;
- par->CRTC[0x13] = var->xres_virtual >> 4;
- par->CRTC[0x14] = 0x00;
- par->CRTC[0x15] = (timings->VSyncStart - 1) & 0xff;
- par->CRTC[0x16] = (timings->VSyncEnd - 1) & 0xff;
- par->CRTC[0x17] = 0xc3;
- par->CRTC[0x18] = 0xff;
+ reg->CRTC[0x09] |= 0x80;
+
+ reg->CRTC[0x0a] = 0x00;
+ reg->CRTC[0x0b] = 0x00;
+ reg->CRTC[0x0c] = 0x00;
+ reg->CRTC[0x0d] = 0x00;
+ reg->CRTC[0x0e] = 0x00;
+ reg->CRTC[0x0f] = 0x00;
+ reg->CRTC[0x10] = timings->VSyncStart & 0xff;
+ reg->CRTC[0x11] = (timings->VSyncEnd & 0x0f) | 0x20;
+ reg->CRTC[0x12] = (timings->VDisplay - 1) & 0xff;
+ reg->CRTC[0x13] = var->xres_virtual >> 4;
+ reg->CRTC[0x14] = 0x00;
+ reg->CRTC[0x15] = (timings->VSyncStart - 1) & 0xff;
+ reg->CRTC[0x16] = (timings->VSyncEnd - 1) & 0xff;
+ reg->CRTC[0x17] = 0xc3;
+ reg->CRTC[0x18] = 0xff;
/*
* are these unnecessary?
@@ -220,38 +221,38 @@ static void vgaHWInit (struct fb_var_screeninfo *var,
/*
* Graphics Display Controller
*/
- par->Graphics[0x00] = 0x00;
- par->Graphics[0x01] = 0x00;
- par->Graphics[0x02] = 0x00;
- par->Graphics[0x03] = 0x00;
- par->Graphics[0x04] = 0x00;
- par->Graphics[0x05] = 0x40;
- par->Graphics[0x06] = 0x05; /* only map 64k VGA memory !!!! */
- par->Graphics[0x07] = 0x0F;
- par->Graphics[0x08] = 0xFF;
-
-
- par->Attribute[0x00] = 0x00; /* standard colormap translation */
- par->Attribute[0x01] = 0x01;
- par->Attribute[0x02] = 0x02;
- par->Attribute[0x03] = 0x03;
- par->Attribute[0x04] = 0x04;
- par->Attribute[0x05] = 0x05;
- par->Attribute[0x06] = 0x06;
- par->Attribute[0x07] = 0x07;
- par->Attribute[0x08] = 0x08;
- par->Attribute[0x09] = 0x09;
- par->Attribute[0x0a] = 0x0A;
- par->Attribute[0x0b] = 0x0B;
- par->Attribute[0x0c] = 0x0C;
- par->Attribute[0x0d] = 0x0D;
- par->Attribute[0x0e] = 0x0E;
- par->Attribute[0x0f] = 0x0F;
- par->Attribute[0x10] = 0x41;
- par->Attribute[0x11] = 0xFF;
- par->Attribute[0x12] = 0x0F;
- par->Attribute[0x13] = 0x00;
- par->Attribute[0x14] = 0x00;
+ reg->Graphics[0x00] = 0x00;
+ reg->Graphics[0x01] = 0x00;
+ reg->Graphics[0x02] = 0x00;
+ reg->Graphics[0x03] = 0x00;
+ reg->Graphics[0x04] = 0x00;
+ reg->Graphics[0x05] = 0x40;
+ reg->Graphics[0x06] = 0x05; /* only map 64k VGA memory !!!! */
+ reg->Graphics[0x07] = 0x0F;
+ reg->Graphics[0x08] = 0xFF;
+
+
+ reg->Attribute[0x00] = 0x00; /* standard colormap translation */
+ reg->Attribute[0x01] = 0x01;
+ reg->Attribute[0x02] = 0x02;
+ reg->Attribute[0x03] = 0x03;
+ reg->Attribute[0x04] = 0x04;
+ reg->Attribute[0x05] = 0x05;
+ reg->Attribute[0x06] = 0x06;
+ reg->Attribute[0x07] = 0x07;
+ reg->Attribute[0x08] = 0x08;
+ reg->Attribute[0x09] = 0x09;
+ reg->Attribute[0x0a] = 0x0A;
+ reg->Attribute[0x0b] = 0x0B;
+ reg->Attribute[0x0c] = 0x0C;
+ reg->Attribute[0x0d] = 0x0D;
+ reg->Attribute[0x0e] = 0x0E;
+ reg->Attribute[0x0f] = 0x0F;
+ reg->Attribute[0x10] = 0x41;
+ reg->Attribute[0x11] = 0xFF;
+ reg->Attribute[0x12] = 0x0F;
+ reg->Attribute[0x13] = 0x00;
+ reg->Attribute[0x14] = 0x00;
}
/* -------------------- Hardware specific routines ------------------------- */
@@ -304,15 +305,15 @@ savage2000_waitidle(struct savagefb_par *par)
while ((savage_in32(0x48C60, par) & 0x009fffff));
}
-
+#ifdef CONFIG_FB_SAVAGE_ACCEL
static void
-SavageSetup2DEngine (struct savagefb_par *par)
+SavageSetup2DEngine(struct savagefb_par *par)
{
unsigned long GlobalBitmapDescriptor;
GlobalBitmapDescriptor = 1 | 8 | BCI_BD_BW_DISABLE;
- BCI_BD_SET_BPP (GlobalBitmapDescriptor, par->depth);
- BCI_BD_SET_STRIDE (GlobalBitmapDescriptor, par->vwidth);
+ BCI_BD_SET_BPP(GlobalBitmapDescriptor, par->depth);
+ BCI_BD_SET_STRIDE(GlobalBitmapDescriptor, par->vwidth);
switch(par->chip) {
case S3_SAVAGE3D:
@@ -361,32 +362,48 @@ SavageSetup2DEngine (struct savagefb_par *par)
vga_out8(0x3d5, 0x0c, par);
/* Set stride to use GBD. */
- vga_out8 (0x3d4, 0x50, par);
- vga_out8 (0x3d5, vga_in8(0x3d5, par) | 0xC1, par);
+ vga_out8(0x3d4, 0x50, par);
+ vga_out8(0x3d5, vga_in8(0x3d5, par) | 0xC1, par);
/* Enable 2D engine. */
- vga_out8 (0x3d4, 0x40, par);
- vga_out8 (0x3d5, 0x01, par);
+ vga_out8(0x3d4, 0x40, par);
+ vga_out8(0x3d5, 0x01, par);
- savage_out32 (MONO_PAT_0, ~0, par);
- savage_out32 (MONO_PAT_1, ~0, par);
+ savage_out32(MONO_PAT_0, ~0, par);
+ savage_out32(MONO_PAT_1, ~0, par);
/* Setup plane masks */
- savage_out32 (0x8128, ~0, par); /* enable all write planes */
- savage_out32 (0x812C, ~0, par); /* enable all read planes */
- savage_out16 (0x8134, 0x27, par);
- savage_out16 (0x8136, 0x07, par);
+ savage_out32(0x8128, ~0, par); /* enable all write planes */
+ savage_out32(0x812C, ~0, par); /* enable all read planes */
+ savage_out16(0x8134, 0x27, par);
+ savage_out16(0x8136, 0x07, par);
/* Now set the GBD */
par->bci_ptr = 0;
- par->SavageWaitFifo (par, 4);
+ par->SavageWaitFifo(par, 4);
- BCI_SEND( BCI_CMD_SETREG | (1 << 16) | BCI_GBD1 );
- BCI_SEND( 0 );
- BCI_SEND( BCI_CMD_SETREG | (1 << 16) | BCI_GBD2 );
- BCI_SEND( GlobalBitmapDescriptor );
+ BCI_SEND(BCI_CMD_SETREG | (1 << 16) | BCI_GBD1);
+ BCI_SEND(0);
+ BCI_SEND(BCI_CMD_SETREG | (1 << 16) | BCI_GBD2);
+ BCI_SEND(GlobalBitmapDescriptor);
}
+static void savagefb_set_clip(struct fb_info *info)
+{
+ struct savagefb_par *par = info->par;
+ int cmd;
+
+ cmd = BCI_CMD_NOP | BCI_CMD_CLIP_NEW;
+ par->bci_ptr = 0;
+ par->SavageWaitFifo(par,3);
+ BCI_SEND(cmd);
+ BCI_SEND(BCI_CLIP_TL(0, 0));
+ BCI_SEND(BCI_CLIP_BR(0xfff, 0xfff));
+}
+#else
+static void SavageSetup2DEngine(struct savagefb_par *par) {}
+
+#endif
static void SavageCalcClock(long freq, int min_m, int min_n1, int max_n1,
int min_n2, int max_n2, long freq_min,
@@ -398,11 +415,11 @@ static void SavageCalcClock(long freq, int min_m, int min_n1, int max_n1,
unsigned char n1, n2, best_n1=16+2, best_n2=2, best_m=125+2;
if (freq < freq_min / (1 << max_n2)) {
- printk (KERN_ERR "invalid frequency %ld Khz\n", freq);
+ printk(KERN_ERR "invalid frequency %ld Khz\n", freq);
freq = freq_min / (1 << max_n2);
}
if (freq > freq_max / (1 << min_n2)) {
- printk (KERN_ERR "invalid frequency %ld Khz\n", freq);
+ printk(KERN_ERR "invalid frequency %ld Khz\n", freq);
freq = freq_max / (1 << min_n2);
}
@@ -453,12 +470,12 @@ static int common_calc_clock(long freq, int min_m, int min_n1, int max_n1,
BASE_FREQ;
if (m < min_m + 2 || m > 127+2)
continue;
- if((m * BASE_FREQ >= freq_min * n1) &&
- (m * BASE_FREQ <= freq_max * n1)) {
+ if ((m * BASE_FREQ >= freq_min * n1) &&
+ (m * BASE_FREQ <= freq_max * n1)) {
diff = freq * (1 << n2) * n1 - BASE_FREQ * m;
- if(diff < 0)
+ if (diff < 0)
diff = -diff;
- if(diff < best_diff) {
+ if (diff < best_diff) {
best_diff = diff;
best_m = m;
best_n1 = n1;
@@ -468,7 +485,7 @@ static int common_calc_clock(long freq, int min_m, int min_n1, int max_n1,
}
}
- if(max_n1 == 63)
+ if (max_n1 == 63)
*ndiv = (best_n1 - 2) | (best_n2 << 6);
else
*ndiv = (best_n1 - 2) | (best_n2 << 5);
@@ -488,23 +505,23 @@ static void SavagePrintRegs(void)
int vgaCRReg = 0x3d5;
printk(KERN_DEBUG "SR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE "
- "xF" );
+ "xF");
- for( i = 0; i < 0x70; i++ ) {
- if( !(i % 16) )
- printk(KERN_DEBUG "\nSR%xx ", i >> 4 );
- vga_out8( 0x3c4, i, par);
- printk(KERN_DEBUG " %02x", vga_in8(0x3c5, par) );
+ for (i = 0; i < 0x70; i++) {
+ if (!(i % 16))
+ printk(KERN_DEBUG "\nSR%xx ", i >> 4);
+ vga_out8(0x3c4, i, par);
+ printk(KERN_DEBUG " %02x", vga_in8(0x3c5, par));
}
printk(KERN_DEBUG "\n\nCR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC "
- "xD xE xF" );
+ "xD xE xF");
- for( i = 0; i < 0xB7; i++ ) {
- if( !(i % 16) )
- printk(KERN_DEBUG "\nCR%xx ", i >> 4 );
- vga_out8( vgaCRIndex, i, par);
- printk(KERN_DEBUG " %02x", vga_in8(vgaCRReg, par) );
+ for (i = 0; i < 0xB7; i++) {
+ if (!(i % 16))
+ printk(KERN_DEBUG "\nCR%xx ", i >> 4);
+ vga_out8(vgaCRIndex, i, par);
+ printk(KERN_DEBUG " %02x", vga_in8(vgaCRReg, par));
}
printk(KERN_DEBUG "\n\n");
@@ -513,156 +530,309 @@ static void SavagePrintRegs(void)
/* --------------------------------------------------------------------- */
-static void savage_get_default_par(struct savagefb_par *par)
+static void savage_get_default_par(struct savagefb_par *par, struct savage_reg *reg)
{
unsigned char cr3a, cr53, cr66;
- vga_out16 (0x3d4, 0x4838, par);
- vga_out16 (0x3d4, 0xa039, par);
- vga_out16 (0x3c4, 0x0608, par);
-
- vga_out8 (0x3d4, 0x66, par);
- cr66 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr66 | 0x80, par);
- vga_out8 (0x3d4, 0x3a, par);
- cr3a = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr3a | 0x80, par);
- vga_out8 (0x3d4, 0x53, par);
- cr53 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr53 & 0x7f, par);
-
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, cr66, par);
- vga_out8 (0x3d4, 0x3a, par);
- vga_out8 (0x3d5, cr3a, par);
-
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, cr66, par);
- vga_out8 (0x3d4, 0x3a, par);
- vga_out8 (0x3d5, cr3a, par);
+ vga_out16(0x3d4, 0x4838, par);
+ vga_out16(0x3d4, 0xa039, par);
+ vga_out16(0x3c4, 0x0608, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x80, par);
+ vga_out8(0x3d4, 0x3a, par);
+ cr3a = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3a | 0x80, par);
+ vga_out8(0x3d4, 0x53, par);
+ cr53 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr53 & 0x7f, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
/* unlock extended seq regs */
- vga_out8 (0x3c4, 0x08, par);
- par->SR08 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c5, 0x06, par);
+ vga_out8(0x3c4, 0x08, par);
+ reg->SR08 = vga_in8(0x3c5, par);
+ vga_out8(0x3c5, 0x06, par);
/* now save all the extended regs we need */
- vga_out8 (0x3d4, 0x31, par);
- par->CR31 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x32, par);
- par->CR32 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x34, par);
- par->CR34 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x36, par);
- par->CR36 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x3a, par);
- par->CR3A = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x40, par);
- par->CR40 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x42, par);
- par->CR42 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x45, par);
- par->CR45 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x50, par);
- par->CR50 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x51, par);
- par->CR51 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x53, par);
- par->CR53 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x58, par);
- par->CR58 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x60, par);
- par->CR60 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x66, par);
- par->CR66 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x67, par);
- par->CR67 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x68, par);
- par->CR68 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x69, par);
- par->CR69 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x6f, par);
- par->CR6F = vga_in8 (0x3d5, par);
-
- vga_out8 (0x3d4, 0x33, par);
- par->CR33 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x86, par);
- par->CR86 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x88, par);
- par->CR88 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x90, par);
- par->CR90 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x91, par);
- par->CR91 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0xb0, par);
- par->CRB0 = vga_in8 (0x3d5, par) | 0x80;
+ vga_out8(0x3d4, 0x31, par);
+ reg->CR31 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x32, par);
+ reg->CR32 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x34, par);
+ reg->CR34 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x36, par);
+ reg->CR36 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x3a, par);
+ reg->CR3A = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x40, par);
+ reg->CR40 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x42, par);
+ reg->CR42 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x45, par);
+ reg->CR45 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x50, par);
+ reg->CR50 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x51, par);
+ reg->CR51 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x53, par);
+ reg->CR53 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x58, par);
+ reg->CR58 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x60, par);
+ reg->CR60 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x66, par);
+ reg->CR66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x67, par);
+ reg->CR67 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x68, par);
+ reg->CR68 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x69, par);
+ reg->CR69 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x6f, par);
+ reg->CR6F = vga_in8(0x3d5, par);
+
+ vga_out8(0x3d4, 0x33, par);
+ reg->CR33 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x86, par);
+ reg->CR86 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x88, par);
+ reg->CR88 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x90, par);
+ reg->CR90 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x91, par);
+ reg->CR91 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0xb0, par);
+ reg->CRB0 = vga_in8(0x3d5, par) | 0x80;
+
+ /* extended mode timing regs */
+ vga_out8(0x3d4, 0x3b, par);
+ reg->CR3B = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x3c, par);
+ reg->CR3C = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x43, par);
+ reg->CR43 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x5d, par);
+ reg->CR5D = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x5e, par);
+ reg->CR5E = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x65, par);
+ reg->CR65 = vga_in8(0x3d5, par);
+
+ /* save seq extended regs for DCLK PLL programming */
+ vga_out8(0x3c4, 0x0e, par);
+ reg->SR0E = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x0f, par);
+ reg->SR0F = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x10, par);
+ reg->SR10 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x11, par);
+ reg->SR11 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x12, par);
+ reg->SR12 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x13, par);
+ reg->SR13 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x29, par);
+ reg->SR29 = vga_in8(0x3c5, par);
+
+ vga_out8(0x3c4, 0x15, par);
+ reg->SR15 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x30, par);
+ reg->SR30 = vga_in8(0x3c5, par);
+ vga_out8(0x3c4, 0x18, par);
+ reg->SR18 = vga_in8(0x3c5, par);
+
+ /* Save flat panel expansion regsters. */
+ if (par->chip == S3_SAVAGE_MX) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ vga_out8(0x3c4, 0x54+i, par);
+ reg->SR54[i] = vga_in8(0x3c5, par);
+ }
+ }
+
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x80, par);
+ vga_out8(0x3d4, 0x3a, par);
+ cr3a = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3a | 0x80, par);
+
+ /* now save MIU regs */
+ if (par->chip != S3_SAVAGE_MX) {
+ reg->MMPR0 = savage_in32(FIFO_CONTROL_REG, par);
+ reg->MMPR1 = savage_in32(MIU_CONTROL_REG, par);
+ reg->MMPR2 = savage_in32(STREAMS_TIMEOUT_REG, par);
+ reg->MMPR3 = savage_in32(MISC_TIMEOUT_REG, par);
+ }
+
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+}
+
+static void savage_set_default_par(struct savagefb_par *par,
+ struct savage_reg *reg)
+{
+ unsigned char cr3a, cr53, cr66;
+
+ vga_out16(0x3d4, 0x4838, par);
+ vga_out16(0x3d4, 0xa039, par);
+ vga_out16(0x3c4, 0x0608, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x80, par);
+ vga_out8(0x3d4, 0x3a, par);
+ cr3a = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3a | 0x80, par);
+ vga_out8(0x3d4, 0x53, par);
+ cr53 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr53 & 0x7f, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
+
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
+
+ /* unlock extended seq regs */
+ vga_out8(0x3c4, 0x08, par);
+ vga_out8(0x3c5, reg->SR08, par);
+ vga_out8(0x3c5, 0x06, par);
+
+ /* now restore all the extended regs we need */
+ vga_out8(0x3d4, 0x31, par);
+ vga_out8(0x3d5, reg->CR31, par);
+ vga_out8(0x3d4, 0x32, par);
+ vga_out8(0x3d5, reg->CR32, par);
+ vga_out8(0x3d4, 0x34, par);
+ vga_out8(0x3d5, reg->CR34, par);
+ vga_out8(0x3d4, 0x36, par);
+ vga_out8(0x3d5,reg->CR36, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, reg->CR3A, par);
+ vga_out8(0x3d4, 0x40, par);
+ vga_out8(0x3d5, reg->CR40, par);
+ vga_out8(0x3d4, 0x42, par);
+ vga_out8(0x3d5, reg->CR42, par);
+ vga_out8(0x3d4, 0x45, par);
+ vga_out8(0x3d5, reg->CR45, par);
+ vga_out8(0x3d4, 0x50, par);
+ vga_out8(0x3d5, reg->CR50, par);
+ vga_out8(0x3d4, 0x51, par);
+ vga_out8(0x3d5, reg->CR51, par);
+ vga_out8(0x3d4, 0x53, par);
+ vga_out8(0x3d5, reg->CR53, par);
+ vga_out8(0x3d4, 0x58, par);
+ vga_out8(0x3d5, reg->CR58, par);
+ vga_out8(0x3d4, 0x60, par);
+ vga_out8(0x3d5, reg->CR60, par);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, reg->CR66, par);
+ vga_out8(0x3d4, 0x67, par);
+ vga_out8(0x3d5, reg->CR67, par);
+ vga_out8(0x3d4, 0x68, par);
+ vga_out8(0x3d5, reg->CR68, par);
+ vga_out8(0x3d4, 0x69, par);
+ vga_out8(0x3d5, reg->CR69, par);
+ vga_out8(0x3d4, 0x6f, par);
+ vga_out8(0x3d5, reg->CR6F, par);
+
+ vga_out8(0x3d4, 0x33, par);
+ vga_out8(0x3d5, reg->CR33, par);
+ vga_out8(0x3d4, 0x86, par);
+ vga_out8(0x3d5, reg->CR86, par);
+ vga_out8(0x3d4, 0x88, par);
+ vga_out8(0x3d5, reg->CR88, par);
+ vga_out8(0x3d4, 0x90, par);
+ vga_out8(0x3d5, reg->CR90, par);
+ vga_out8(0x3d4, 0x91, par);
+ vga_out8(0x3d5, reg->CR91, par);
+ vga_out8(0x3d4, 0xb0, par);
+ vga_out8(0x3d5, reg->CRB0, par);
/* extended mode timing regs */
- vga_out8 (0x3d4, 0x3b, par);
- par->CR3B = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x3c, par);
- par->CR3C = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x43, par);
- par->CR43 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x5d, par);
- par->CR5D = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x5e, par);
- par->CR5E = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x65, par);
- par->CR65 = vga_in8 (0x3d5, par);
+ vga_out8(0x3d4, 0x3b, par);
+ vga_out8(0x3d5, reg->CR3B, par);
+ vga_out8(0x3d4, 0x3c, par);
+ vga_out8(0x3d5, reg->CR3C, par);
+ vga_out8(0x3d4, 0x43, par);
+ vga_out8(0x3d5, reg->CR43, par);
+ vga_out8(0x3d4, 0x5d, par);
+ vga_out8(0x3d5, reg->CR5D, par);
+ vga_out8(0x3d4, 0x5e, par);
+ vga_out8(0x3d5, reg->CR5E, par);
+ vga_out8(0x3d4, 0x65, par);
+ vga_out8(0x3d5, reg->CR65, par);
/* save seq extended regs for DCLK PLL programming */
- vga_out8 (0x3c4, 0x0e, par);
- par->SR0E = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x0f, par);
- par->SR0F = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x10, par);
- par->SR10 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x11, par);
- par->SR11 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x12, par);
- par->SR12 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x13, par);
- par->SR13 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x29, par);
- par->SR29 = vga_in8 (0x3c5, par);
-
- vga_out8 (0x3c4, 0x15, par);
- par->SR15 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x30, par);
- par->SR30 = vga_in8 (0x3c5, par);
- vga_out8 (0x3c4, 0x18, par);
- par->SR18 = vga_in8 (0x3c5, par);
+ vga_out8(0x3c4, 0x0e, par);
+ vga_out8(0x3c5, reg->SR0E, par);
+ vga_out8(0x3c4, 0x0f, par);
+ vga_out8(0x3c5, reg->SR0F, par);
+ vga_out8(0x3c4, 0x10, par);
+ vga_out8(0x3c5, reg->SR10, par);
+ vga_out8(0x3c4, 0x11, par);
+ vga_out8(0x3c5, reg->SR11, par);
+ vga_out8(0x3c4, 0x12, par);
+ vga_out8(0x3c5, reg->SR12, par);
+ vga_out8(0x3c4, 0x13, par);
+ vga_out8(0x3c5, reg->SR13, par);
+ vga_out8(0x3c4, 0x29, par);
+ vga_out8(0x3c5, reg->SR29, par);
+
+ vga_out8(0x3c4, 0x15, par);
+ vga_out8(0x3c5, reg->SR15, par);
+ vga_out8(0x3c4, 0x30, par);
+ vga_out8(0x3c5, reg->SR30, par);
+ vga_out8(0x3c4, 0x18, par);
+ vga_out8(0x3c5, reg->SR18, par);
/* Save flat panel expansion regsters. */
if (par->chip == S3_SAVAGE_MX) {
int i;
for (i = 0; i < 8; i++) {
- vga_out8 (0x3c4, 0x54+i, par);
- par->SR54[i] = vga_in8 (0x3c5, par);
+ vga_out8(0x3c4, 0x54+i, par);
+ vga_out8(0x3c5, reg->SR54[i], par);
}
}
- vga_out8 (0x3d4, 0x66, par);
- cr66 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr66 | 0x80, par);
- vga_out8 (0x3d4, 0x3a, par);
- cr3a = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr3a | 0x80, par);
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x80, par);
+ vga_out8(0x3d4, 0x3a, par);
+ cr3a = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3a | 0x80, par);
/* now save MIU regs */
if (par->chip != S3_SAVAGE_MX) {
- par->MMPR0 = savage_in32(FIFO_CONTROL_REG, par);
- par->MMPR1 = savage_in32(MIU_CONTROL_REG, par);
- par->MMPR2 = savage_in32(STREAMS_TIMEOUT_REG, par);
- par->MMPR3 = savage_in32(MISC_TIMEOUT_REG, par);
+ savage_out32(FIFO_CONTROL_REG, reg->MMPR0, par);
+ savage_out32(MIU_CONTROL_REG, reg->MMPR1, par);
+ savage_out32(STREAMS_TIMEOUT_REG, reg->MMPR2, par);
+ savage_out32(MISC_TIMEOUT_REG, reg->MMPR3, par);
}
- vga_out8 (0x3d4, 0x3a, par);
- vga_out8 (0x3d5, cr3a, par);
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
}
static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb)
@@ -683,8 +853,8 @@ static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode
var->vmode = modedb->vmode;
}
-static int savagefb_check_var (struct fb_var_screeninfo *var,
- struct fb_info *info)
+static int savagefb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct savagefb_par *par = info->par;
int memlen, vramlen, mode_valid = 0;
@@ -750,10 +920,10 @@ static int savagefb_check_var (struct fb_var_screeninfo *var,
if (par->SavagePanelWidth &&
(var->xres > par->SavagePanelWidth ||
var->yres > par->SavagePanelHeight)) {
- printk (KERN_INFO "Mode (%dx%d) larger than the LCD panel "
- "(%dx%d)\n", var->xres, var->yres,
- par->SavagePanelWidth,
- par->SavagePanelHeight);
+ printk(KERN_INFO "Mode (%dx%d) larger than the LCD panel "
+ "(%dx%d)\n", var->xres, var->yres,
+ par->SavagePanelWidth,
+ par->SavagePanelHeight);
return -1;
}
@@ -788,8 +958,9 @@ static int savagefb_check_var (struct fb_var_screeninfo *var,
}
-static int savagefb_decode_var (struct fb_var_screeninfo *var,
- struct savagefb_par *par)
+static int savagefb_decode_var(struct fb_var_screeninfo *var,
+ struct savagefb_par *par,
+ struct savage_reg *reg)
{
struct xtimings timings;
int width, dclk, i, j; /*, refresh; */
@@ -799,7 +970,7 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
DBG("savagefb_decode_var");
- memset (&timings, 0, sizeof(timings));
+ memset(&timings, 0, sizeof(timings));
if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */
timings.Clock = 1000000000 / pixclock;
@@ -831,39 +1002,39 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
* This will allocate the datastructure and initialize all of the
* generic VGA registers.
*/
- vgaHWInit (var, par, &timings);
+ vgaHWInit(var, par, &timings, reg);
/* We need to set CR67 whether or not we use the BIOS. */
dclk = timings.Clock;
- par->CR67 = 0x00;
+ reg->CR67 = 0x00;
- switch( var->bits_per_pixel ) {
+ switch(var->bits_per_pixel) {
case 8:
- if( (par->chip == S3_SAVAGE2000) && (dclk >= 230000) )
- par->CR67 = 0x10; /* 8bpp, 2 pixels/clock */
+ if ((par->chip == S3_SAVAGE2000) && (dclk >= 230000))
+ reg->CR67 = 0x10; /* 8bpp, 2 pixels/clock */
else
- par->CR67 = 0x00; /* 8bpp, 1 pixel/clock */
+ reg->CR67 = 0x00; /* 8bpp, 1 pixel/clock */
break;
case 15:
- if ( S3_SAVAGE_MOBILE_SERIES(par->chip) ||
- ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)) )
- par->CR67 = 0x30; /* 15bpp, 2 pixel/clock */
+ if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
+ ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)))
+ reg->CR67 = 0x30; /* 15bpp, 2 pixel/clock */
else
- par->CR67 = 0x20; /* 15bpp, 1 pixels/clock */
+ reg->CR67 = 0x20; /* 15bpp, 1 pixels/clock */
break;
case 16:
- if( S3_SAVAGE_MOBILE_SERIES(par->chip) ||
- ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)) )
- par->CR67 = 0x50; /* 16bpp, 2 pixel/clock */
+ if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
+ ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)))
+ reg->CR67 = 0x50; /* 16bpp, 2 pixel/clock */
else
- par->CR67 = 0x40; /* 16bpp, 1 pixels/clock */
+ reg->CR67 = 0x40; /* 16bpp, 1 pixels/clock */
break;
case 24:
- par->CR67 = 0x70;
+ reg->CR67 = 0x70;
break;
case 32:
- par->CR67 = 0xd0;
+ reg->CR67 = 0xd0;
break;
}
@@ -872,61 +1043,61 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
* match. Fall back to traditional register-crunching.
*/
- vga_out8 (0x3d4, 0x3a, par);
- tmp = vga_in8 (0x3d5, par);
+ vga_out8(0x3d4, 0x3a, par);
+ tmp = vga_in8(0x3d5, par);
if (1 /*FIXME:psav->pci_burst*/)
- par->CR3A = (tmp & 0x7f) | 0x15;
+ reg->CR3A = (tmp & 0x7f) | 0x15;
else
- par->CR3A = tmp | 0x95;
+ reg->CR3A = tmp | 0x95;
- par->CR53 = 0x00;
- par->CR31 = 0x8c;
- par->CR66 = 0x89;
+ reg->CR53 = 0x00;
+ reg->CR31 = 0x8c;
+ reg->CR66 = 0x89;
- vga_out8 (0x3d4, 0x58, par);
- par->CR58 = vga_in8 (0x3d5, par) & 0x80;
- par->CR58 |= 0x13;
+ vga_out8(0x3d4, 0x58, par);
+ reg->CR58 = vga_in8(0x3d5, par) & 0x80;
+ reg->CR58 |= 0x13;
- par->SR15 = 0x03 | 0x80;
- par->SR18 = 0x00;
- par->CR43 = par->CR45 = par->CR65 = 0x00;
+ reg->SR15 = 0x03 | 0x80;
+ reg->SR18 = 0x00;
+ reg->CR43 = reg->CR45 = reg->CR65 = 0x00;
- vga_out8 (0x3d4, 0x40, par);
- par->CR40 = vga_in8 (0x3d5, par) & ~0x01;
+ vga_out8(0x3d4, 0x40, par);
+ reg->CR40 = vga_in8(0x3d5, par) & ~0x01;
- par->MMPR0 = 0x010400;
- par->MMPR1 = 0x00;
- par->MMPR2 = 0x0808;
- par->MMPR3 = 0x08080810;
+ reg->MMPR0 = 0x010400;
+ reg->MMPR1 = 0x00;
+ reg->MMPR2 = 0x0808;
+ reg->MMPR3 = 0x08080810;
- SavageCalcClock (dclk, 1, 1, 127, 0, 4, 180000, 360000, &m, &n, &r);
+ SavageCalcClock(dclk, 1, 1, 127, 0, 4, 180000, 360000, &m, &n, &r);
/* m = 107; n = 4; r = 2; */
if (par->MCLK <= 0) {
- par->SR10 = 255;
- par->SR11 = 255;
+ reg->SR10 = 255;
+ reg->SR11 = 255;
} else {
- common_calc_clock (par->MCLK, 1, 1, 31, 0, 3, 135000, 270000,
- &par->SR11, &par->SR10);
- /* par->SR10 = 80; // MCLK == 286000 */
- /* par->SR11 = 125; */
+ common_calc_clock(par->MCLK, 1, 1, 31, 0, 3, 135000, 270000,
+ &reg->SR11, &reg->SR10);
+ /* reg->SR10 = 80; // MCLK == 286000 */
+ /* reg->SR11 = 125; */
}
- par->SR12 = (r << 6) | (n & 0x3f);
- par->SR13 = m & 0xff;
- par->SR29 = (r & 4) | (m & 0x100) >> 5 | (n & 0x40) >> 2;
+ reg->SR12 = (r << 6) | (n & 0x3f);
+ reg->SR13 = m & 0xff;
+ reg->SR29 = (r & 4) | (m & 0x100) >> 5 | (n & 0x40) >> 2;
if (var->bits_per_pixel < 24)
- par->MMPR0 -= 0x8000;
+ reg->MMPR0 -= 0x8000;
else
- par->MMPR0 -= 0x4000;
+ reg->MMPR0 -= 0x4000;
if (timings.interlaced)
- par->CR42 = 0x20;
+ reg->CR42 = 0x20;
else
- par->CR42 = 0x00;
+ reg->CR42 = 0x00;
- par->CR34 = 0x10; /* display fifo */
+ reg->CR34 = 0x10; /* display fifo */
i = ((((timings.HTotal >> 3) - 5) & 0x100) >> 8) |
((((timings.HDisplay >> 3) - 1) & 0x100) >> 7) |
@@ -938,77 +1109,77 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
if ((timings.HSyncEnd >> 3) - (timings.HSyncStart >> 3) > 32)
i |= 0x20;
- j = (par->CRTC[0] + ((i & 0x01) << 8) +
- par->CRTC[4] + ((i & 0x10) << 4) + 1) / 2;
+ j = (reg->CRTC[0] + ((i & 0x01) << 8) +
+ reg->CRTC[4] + ((i & 0x10) << 4) + 1) / 2;
- if (j - (par->CRTC[4] + ((i & 0x10) << 4)) < 4) {
- if (par->CRTC[4] + ((i & 0x10) << 4) + 4 <=
- par->CRTC[0] + ((i & 0x01) << 8))
- j = par->CRTC[4] + ((i & 0x10) << 4) + 4;
+ if (j - (reg->CRTC[4] + ((i & 0x10) << 4)) < 4) {
+ if (reg->CRTC[4] + ((i & 0x10) << 4) + 4 <=
+ reg->CRTC[0] + ((i & 0x01) << 8))
+ j = reg->CRTC[4] + ((i & 0x10) << 4) + 4;
else
- j = par->CRTC[0] + ((i & 0x01) << 8) + 1;
+ j = reg->CRTC[0] + ((i & 0x01) << 8) + 1;
}
- par->CR3B = j & 0xff;
+ reg->CR3B = j & 0xff;
i |= (j & 0x100) >> 2;
- par->CR3C = (par->CRTC[0] + ((i & 0x01) << 8)) / 2;
- par->CR5D = i;
- par->CR5E = (((timings.VTotal - 2) & 0x400) >> 10) |
+ reg->CR3C = (reg->CRTC[0] + ((i & 0x01) << 8)) / 2;
+ reg->CR5D = i;
+ reg->CR5E = (((timings.VTotal - 2) & 0x400) >> 10) |
(((timings.VDisplay - 1) & 0x400) >> 9) |
(((timings.VSyncStart) & 0x400) >> 8) |
(((timings.VSyncStart) & 0x400) >> 6) | 0x40;
width = (var->xres_virtual * ((var->bits_per_pixel+7) / 8)) >> 3;
- par->CR91 = par->CRTC[19] = 0xff & width;
- par->CR51 = (0x300 & width) >> 4;
- par->CR90 = 0x80 | (width >> 8);
- par->MiscOutReg |= 0x0c;
+ reg->CR91 = reg->CRTC[19] = 0xff & width;
+ reg->CR51 = (0x300 & width) >> 4;
+ reg->CR90 = 0x80 | (width >> 8);
+ reg->MiscOutReg |= 0x0c;
/* Set frame buffer description. */
if (var->bits_per_pixel <= 8)
- par->CR50 = 0;
+ reg->CR50 = 0;
else if (var->bits_per_pixel <= 16)
- par->CR50 = 0x10;
+ reg->CR50 = 0x10;
else
- par->CR50 = 0x30;
+ reg->CR50 = 0x30;
if (var->xres_virtual <= 640)
- par->CR50 |= 0x40;
+ reg->CR50 |= 0x40;
else if (var->xres_virtual == 800)
- par->CR50 |= 0x80;
+ reg->CR50 |= 0x80;
else if (var->xres_virtual == 1024)
- par->CR50 |= 0x00;
+ reg->CR50 |= 0x00;
else if (var->xres_virtual == 1152)
- par->CR50 |= 0x01;
+ reg->CR50 |= 0x01;
else if (var->xres_virtual == 1280)
- par->CR50 |= 0xc0;
+ reg->CR50 |= 0xc0;
else if (var->xres_virtual == 1600)
- par->CR50 |= 0x81;
+ reg->CR50 |= 0x81;
else
- par->CR50 |= 0xc1; /* Use GBD */
+ reg->CR50 |= 0xc1; /* Use GBD */
- if( par->chip == S3_SAVAGE2000 )
- par->CR33 = 0x08;
+ if (par->chip == S3_SAVAGE2000)
+ reg->CR33 = 0x08;
else
- par->CR33 = 0x20;
+ reg->CR33 = 0x20;
- par->CRTC[0x17] = 0xeb;
+ reg->CRTC[0x17] = 0xeb;
- par->CR67 |= 1;
+ reg->CR67 |= 1;
vga_out8(0x3d4, 0x36, par);
- par->CR36 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x68, par);
- par->CR68 = vga_in8 (0x3d5, par);
- par->CR69 = 0;
- vga_out8 (0x3d4, 0x6f, par);
- par->CR6F = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x86, par);
- par->CR86 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d4, 0x88, par);
- par->CR88 = vga_in8 (0x3d5, par) | 0x08;
- vga_out8 (0x3d4, 0xb0, par);
- par->CRB0 = vga_in8 (0x3d5, par) | 0x80;
+ reg->CR36 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x68, par);
+ reg->CR68 = vga_in8(0x3d5, par);
+ reg->CR69 = 0;
+ vga_out8(0x3d4, 0x6f, par);
+ reg->CR6F = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x86, par);
+ reg->CR86 = vga_in8(0x3d5, par);
+ vga_out8(0x3d4, 0x88, par);
+ reg->CR88 = vga_in8(0x3d5, par) | 0x08;
+ vga_out8(0x3d4, 0xb0, par);
+ reg->CRB0 = vga_in8(0x3d5, par) | 0x80;
return 0;
}
@@ -1037,11 +1208,11 @@ static int savagefb_setcolreg(unsigned regno,
switch (info->var.bits_per_pixel) {
case 8:
- vga_out8 (0x3c8, regno, par);
+ vga_out8(0x3c8, regno, par);
- vga_out8 (0x3c9, red >> 10, par);
- vga_out8 (0x3c9, green >> 10, par);
- vga_out8 (0x3c9, blue >> 10, par);
+ vga_out8(0x3c9, red >> 10, par);
+ vga_out8(0x3c9, green >> 10, par);
+ vga_out8(0x3c9, blue >> 10, par);
break;
case 16:
@@ -1075,21 +1246,21 @@ static int savagefb_setcolreg(unsigned regno,
return 0;
}
-static void savagefb_set_par_int (struct savagefb_par *par)
+static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *reg)
{
unsigned char tmp, cr3a, cr66, cr67;
- DBG ("savagefb_set_par_int");
+ DBG("savagefb_set_par_int");
- par->SavageWaitIdle (par);
+ par->SavageWaitIdle(par);
- vga_out8 (0x3c2, 0x23, par);
+ vga_out8(0x3c2, 0x23, par);
- vga_out16 (0x3d4, 0x4838, par);
- vga_out16 (0x3d4, 0xa539, par);
- vga_out16 (0x3c4, 0x0608, par);
+ vga_out16(0x3d4, 0x4838, par);
+ vga_out16(0x3d4, 0xa539, par);
+ vga_out16(0x3c4, 0x0608, par);
- vgaHWProtect (par, 1);
+ vgaHWProtect(par, 1);
/*
* Some Savage/MX and /IX systems go nuts when trying to exit the
@@ -1099,203 +1270,202 @@ static void savagefb_set_par_int (struct savagefb_par *par)
*/
VerticalRetraceWait(par);
- vga_out8 (0x3d4, 0x67, par);
- cr67 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr67/*par->CR67*/ & ~0x0c, par); /* no STREAMS yet */
+ vga_out8(0x3d4, 0x67, par);
+ cr67 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr67/*par->CR67*/ & ~0x0c, par); /* no STREAMS yet */
- vga_out8 (0x3d4, 0x23, par);
- vga_out8 (0x3d5, 0x00, par);
- vga_out8 (0x3d4, 0x26, par);
- vga_out8 (0x3d5, 0x00, par);
+ vga_out8(0x3d4, 0x23, par);
+ vga_out8(0x3d5, 0x00, par);
+ vga_out8(0x3d4, 0x26, par);
+ vga_out8(0x3d5, 0x00, par);
/* restore extended regs */
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, par->CR66, par);
- vga_out8 (0x3d4, 0x3a, par);
- vga_out8 (0x3d5, par->CR3A, par);
- vga_out8 (0x3d4, 0x31, par);
- vga_out8 (0x3d5, par->CR31, par);
- vga_out8 (0x3d4, 0x32, par);
- vga_out8 (0x3d5, par->CR32, par);
- vga_out8 (0x3d4, 0x58, par);
- vga_out8 (0x3d5, par->CR58, par);
- vga_out8 (0x3d4, 0x53, par);
- vga_out8 (0x3d5, par->CR53 & 0x7f, par);
-
- vga_out16 (0x3c4, 0x0608, par);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, reg->CR66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, reg->CR3A, par);
+ vga_out8(0x3d4, 0x31, par);
+ vga_out8(0x3d5, reg->CR31, par);
+ vga_out8(0x3d4, 0x32, par);
+ vga_out8(0x3d5, reg->CR32, par);
+ vga_out8(0x3d4, 0x58, par);
+ vga_out8(0x3d5, reg->CR58, par);
+ vga_out8(0x3d4, 0x53, par);
+ vga_out8(0x3d5, reg->CR53 & 0x7f, par);
+
+ vga_out16(0x3c4, 0x0608, par);
/* Restore DCLK registers. */
- vga_out8 (0x3c4, 0x0e, par);
- vga_out8 (0x3c5, par->SR0E, par);
- vga_out8 (0x3c4, 0x0f, par);
- vga_out8 (0x3c5, par->SR0F, par);
- vga_out8 (0x3c4, 0x29, par);
- vga_out8 (0x3c5, par->SR29, par);
- vga_out8 (0x3c4, 0x15, par);
- vga_out8 (0x3c5, par->SR15, par);
+ vga_out8(0x3c4, 0x0e, par);
+ vga_out8(0x3c5, reg->SR0E, par);
+ vga_out8(0x3c4, 0x0f, par);
+ vga_out8(0x3c5, reg->SR0F, par);
+ vga_out8(0x3c4, 0x29, par);
+ vga_out8(0x3c5, reg->SR29, par);
+ vga_out8(0x3c4, 0x15, par);
+ vga_out8(0x3c5, reg->SR15, par);
/* Restore flat panel expansion regsters. */
- if( par->chip == S3_SAVAGE_MX ) {
+ if (par->chip == S3_SAVAGE_MX) {
int i;
- for( i = 0; i < 8; i++ ) {
- vga_out8 (0x3c4, 0x54+i, par);
- vga_out8 (0x3c5, par->SR54[i], par);
+ for (i = 0; i < 8; i++) {
+ vga_out8(0x3c4, 0x54+i, par);
+ vga_out8(0x3c5, reg->SR54[i], par);
}
}
- vgaHWRestore (par);
+ vgaHWRestore (par, reg);
/* extended mode timing registers */
- vga_out8 (0x3d4, 0x53, par);
- vga_out8 (0x3d5, par->CR53, par);
- vga_out8 (0x3d4, 0x5d, par);
- vga_out8 (0x3d5, par->CR5D, par);
- vga_out8 (0x3d4, 0x5e, par);
- vga_out8 (0x3d5, par->CR5E, par);
- vga_out8 (0x3d4, 0x3b, par);
- vga_out8 (0x3d5, par->CR3B, par);
- vga_out8 (0x3d4, 0x3c, par);
- vga_out8 (0x3d5, par->CR3C, par);
- vga_out8 (0x3d4, 0x43, par);
- vga_out8 (0x3d5, par->CR43, par);
- vga_out8 (0x3d4, 0x65, par);
- vga_out8 (0x3d5, par->CR65, par);
+ vga_out8(0x3d4, 0x53, par);
+ vga_out8(0x3d5, reg->CR53, par);
+ vga_out8(0x3d4, 0x5d, par);
+ vga_out8(0x3d5, reg->CR5D, par);
+ vga_out8(0x3d4, 0x5e, par);
+ vga_out8(0x3d5, reg->CR5E, par);
+ vga_out8(0x3d4, 0x3b, par);
+ vga_out8(0x3d5, reg->CR3B, par);
+ vga_out8(0x3d4, 0x3c, par);
+ vga_out8(0x3d5, reg->CR3C, par);
+ vga_out8(0x3d4, 0x43, par);
+ vga_out8(0x3d5, reg->CR43, par);
+ vga_out8(0x3d4, 0x65, par);
+ vga_out8(0x3d5, reg->CR65, par);
/* restore the desired video mode with cr67 */
- vga_out8 (0x3d4, 0x67, par);
+ vga_out8(0x3d4, 0x67, par);
/* following part not present in X11 driver */
- cr67 = vga_in8 (0x3d5, par) & 0xf;
- vga_out8 (0x3d5, 0x50 | cr67, par);
- udelay (10000);
- vga_out8 (0x3d4, 0x67, par);
+ cr67 = vga_in8(0x3d5, par) & 0xf;
+ vga_out8(0x3d5, 0x50 | cr67, par);
+ udelay(10000);
+ vga_out8(0x3d4, 0x67, par);
/* end of part */
- vga_out8 (0x3d5, par->CR67 & ~0x0c, par);
+ vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
/* other mode timing and extended regs */
- vga_out8 (0x3d4, 0x34, par);
- vga_out8 (0x3d5, par->CR34, par);
- vga_out8 (0x3d4, 0x40, par);
- vga_out8 (0x3d5, par->CR40, par);
- vga_out8 (0x3d4, 0x42, par);
- vga_out8 (0x3d5, par->CR42, par);
- vga_out8 (0x3d4, 0x45, par);
- vga_out8 (0x3d5, par->CR45, par);
- vga_out8 (0x3d4, 0x50, par);
- vga_out8 (0x3d5, par->CR50, par);
- vga_out8 (0x3d4, 0x51, par);
- vga_out8 (0x3d5, par->CR51, par);
+ vga_out8(0x3d4, 0x34, par);
+ vga_out8(0x3d5, reg->CR34, par);
+ vga_out8(0x3d4, 0x40, par);
+ vga_out8(0x3d5, reg->CR40, par);
+ vga_out8(0x3d4, 0x42, par);
+ vga_out8(0x3d5, reg->CR42, par);
+ vga_out8(0x3d4, 0x45, par);
+ vga_out8(0x3d5, reg->CR45, par);
+ vga_out8(0x3d4, 0x50, par);
+ vga_out8(0x3d5, reg->CR50, par);
+ vga_out8(0x3d4, 0x51, par);
+ vga_out8(0x3d5, reg->CR51, par);
/* memory timings */
- vga_out8 (0x3d4, 0x36, par);
- vga_out8 (0x3d5, par->CR36, par);
- vga_out8 (0x3d4, 0x60, par);
- vga_out8 (0x3d5, par->CR60, par);
- vga_out8 (0x3d4, 0x68, par);
- vga_out8 (0x3d5, par->CR68, par);
- vga_out8 (0x3d4, 0x69, par);
- vga_out8 (0x3d5, par->CR69, par);
- vga_out8 (0x3d4, 0x6f, par);
- vga_out8 (0x3d5, par->CR6F, par);
-
- vga_out8 (0x3d4, 0x33, par);
- vga_out8 (0x3d5, par->CR33, par);
- vga_out8 (0x3d4, 0x86, par);
- vga_out8 (0x3d5, par->CR86, par);
- vga_out8 (0x3d4, 0x88, par);
- vga_out8 (0x3d5, par->CR88, par);
- vga_out8 (0x3d4, 0x90, par);
- vga_out8 (0x3d5, par->CR90, par);
- vga_out8 (0x3d4, 0x91, par);
- vga_out8 (0x3d5, par->CR91, par);
+ vga_out8(0x3d4, 0x36, par);
+ vga_out8(0x3d5, reg->CR36, par);
+ vga_out8(0x3d4, 0x60, par);
+ vga_out8(0x3d5, reg->CR60, par);
+ vga_out8(0x3d4, 0x68, par);
+ vga_out8(0x3d5, reg->CR68, par);
+ vga_out8(0x3d4, 0x69, par);
+ vga_out8(0x3d5, reg->CR69, par);
+ vga_out8(0x3d4, 0x6f, par);
+ vga_out8(0x3d5, reg->CR6F, par);
+
+ vga_out8(0x3d4, 0x33, par);
+ vga_out8(0x3d5, reg->CR33, par);
+ vga_out8(0x3d4, 0x86, par);
+ vga_out8(0x3d5, reg->CR86, par);
+ vga_out8(0x3d4, 0x88, par);
+ vga_out8(0x3d5, reg->CR88, par);
+ vga_out8(0x3d4, 0x90, par);
+ vga_out8(0x3d5, reg->CR90, par);
+ vga_out8(0x3d4, 0x91, par);
+ vga_out8(0x3d5, reg->CR91, par);
if (par->chip == S3_SAVAGE4) {
- vga_out8 (0x3d4, 0xb0, par);
- vga_out8 (0x3d5, par->CRB0, par);
+ vga_out8(0x3d4, 0xb0, par);
+ vga_out8(0x3d5, reg->CRB0, par);
}
- vga_out8 (0x3d4, 0x32, par);
- vga_out8 (0x3d5, par->CR32, par);
+ vga_out8(0x3d4, 0x32, par);
+ vga_out8(0x3d5, reg->CR32, par);
/* unlock extended seq regs */
- vga_out8 (0x3c4, 0x08, par);
- vga_out8 (0x3c5, 0x06, par);
+ vga_out8(0x3c4, 0x08, par);
+ vga_out8(0x3c5, 0x06, par);
/* Restore extended sequencer regs for MCLK. SR10 == 255 indicates
* that we should leave the default SR10 and SR11 values there.
*/
- if (par->SR10 != 255) {
- vga_out8 (0x3c4, 0x10, par);
- vga_out8 (0x3c5, par->SR10, par);
- vga_out8 (0x3c4, 0x11, par);
- vga_out8 (0x3c5, par->SR11, par);
+ if (reg->SR10 != 255) {
+ vga_out8(0x3c4, 0x10, par);
+ vga_out8(0x3c5, reg->SR10, par);
+ vga_out8(0x3c4, 0x11, par);
+ vga_out8(0x3c5, reg->SR11, par);
}
/* restore extended seq regs for dclk */
- vga_out8 (0x3c4, 0x0e, par);
- vga_out8 (0x3c5, par->SR0E, par);
- vga_out8 (0x3c4, 0x0f, par);
- vga_out8 (0x3c5, par->SR0F, par);
- vga_out8 (0x3c4, 0x12, par);
- vga_out8 (0x3c5, par->SR12, par);
- vga_out8 (0x3c4, 0x13, par);
- vga_out8 (0x3c5, par->SR13, par);
- vga_out8 (0x3c4, 0x29, par);
- vga_out8 (0x3c5, par->SR29, par);
-
- vga_out8 (0x3c4, 0x18, par);
- vga_out8 (0x3c5, par->SR18, par);
+ vga_out8(0x3c4, 0x0e, par);
+ vga_out8(0x3c5, reg->SR0E, par);
+ vga_out8(0x3c4, 0x0f, par);
+ vga_out8(0x3c5, reg->SR0F, par);
+ vga_out8(0x3c4, 0x12, par);
+ vga_out8(0x3c5, reg->SR12, par);
+ vga_out8(0x3c4, 0x13, par);
+ vga_out8(0x3c5, reg->SR13, par);
+ vga_out8(0x3c4, 0x29, par);
+ vga_out8(0x3c5, reg->SR29, par);
+ vga_out8(0x3c4, 0x18, par);
+ vga_out8(0x3c5, reg->SR18, par);
/* load new m, n pll values for dclk & mclk */
- vga_out8 (0x3c4, 0x15, par);
- tmp = vga_in8 (0x3c5, par) & ~0x21;
+ vga_out8(0x3c4, 0x15, par);
+ tmp = vga_in8(0x3c5, par) & ~0x21;
- vga_out8 (0x3c5, tmp | 0x03, par);
- vga_out8 (0x3c5, tmp | 0x23, par);
- vga_out8 (0x3c5, tmp | 0x03, par);
- vga_out8 (0x3c5, par->SR15, par);
- udelay (100);
+ vga_out8(0x3c5, tmp | 0x03, par);
+ vga_out8(0x3c5, tmp | 0x23, par);
+ vga_out8(0x3c5, tmp | 0x03, par);
+ vga_out8(0x3c5, reg->SR15, par);
+ udelay(100);
- vga_out8 (0x3c4, 0x30, par);
- vga_out8 (0x3c5, par->SR30, par);
- vga_out8 (0x3c4, 0x08, par);
- vga_out8 (0x3c5, par->SR08, par);
+ vga_out8(0x3c4, 0x30, par);
+ vga_out8(0x3c5, reg->SR30, par);
+ vga_out8(0x3c4, 0x08, par);
+ vga_out8(0x3c5, reg->SR08, par);
/* now write out cr67 in full, possibly starting STREAMS */
VerticalRetraceWait(par);
- vga_out8 (0x3d4, 0x67, par);
- vga_out8 (0x3d5, par->CR67, par);
+ vga_out8(0x3d4, 0x67, par);
+ vga_out8(0x3d5, reg->CR67, par);
- vga_out8 (0x3d4, 0x66, par);
- cr66 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr66 | 0x80, par);
- vga_out8 (0x3d4, 0x3a, par);
- cr3a = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr3a | 0x80, par);
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x80, par);
+ vga_out8(0x3d4, 0x3a, par);
+ cr3a = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3a | 0x80, par);
if (par->chip != S3_SAVAGE_MX) {
VerticalRetraceWait(par);
- savage_out32 (FIFO_CONTROL_REG, par->MMPR0, par);
- par->SavageWaitIdle (par);
- savage_out32 (MIU_CONTROL_REG, par->MMPR1, par);
- par->SavageWaitIdle (par);
- savage_out32 (STREAMS_TIMEOUT_REG, par->MMPR2, par);
- par->SavageWaitIdle (par);
- savage_out32 (MISC_TIMEOUT_REG, par->MMPR3, par);
+ savage_out32(FIFO_CONTROL_REG, reg->MMPR0, par);
+ par->SavageWaitIdle(par);
+ savage_out32(MIU_CONTROL_REG, reg->MMPR1, par);
+ par->SavageWaitIdle(par);
+ savage_out32(STREAMS_TIMEOUT_REG, reg->MMPR2, par);
+ par->SavageWaitIdle(par);
+ savage_out32(MISC_TIMEOUT_REG, reg->MMPR3, par);
}
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, cr66, par);
- vga_out8 (0x3d4, 0x3a, par);
- vga_out8 (0x3d5, cr3a, par);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66, par);
+ vga_out8(0x3d4, 0x3a, par);
+ vga_out8(0x3d5, cr3a, par);
- SavageSetup2DEngine (par);
- vgaHWProtect (par, 0);
+ SavageSetup2DEngine(par);
+ vgaHWProtect(par, 0);
}
-static void savagefb_update_start (struct savagefb_par *par,
- struct fb_var_screeninfo *var)
+static void savagefb_update_start(struct savagefb_par *par,
+ struct fb_var_screeninfo *var)
{
int base;
@@ -1305,8 +1475,8 @@ static void savagefb_update_start (struct savagefb_par *par,
/* now program the start address registers */
vga_out16(0x3d4, (base & 0x00ff00) | 0x0c, par);
vga_out16(0x3d4, ((base & 0x00ff) << 8) | 0x0d, par);
- vga_out8 (0x3d4, 0x69, par);
- vga_out8 (0x3d5, (base & 0x7f0000) >> 16, par);
+ vga_out8(0x3d4, 0x69, par);
+ vga_out8(0x3d5, (base & 0x7f0000) >> 16, par);
}
@@ -1325,29 +1495,14 @@ static void savagefb_set_fix(struct fb_info *info)
}
-#if defined(CONFIG_FB_SAVAGE_ACCEL)
-static void savagefb_set_clip(struct fb_info *info)
-{
- struct savagefb_par *par = info->par;
- int cmd;
-
- cmd = BCI_CMD_NOP | BCI_CMD_CLIP_NEW;
- par->bci_ptr = 0;
- par->SavageWaitFifo(par,3);
- BCI_SEND(cmd);
- BCI_SEND(BCI_CLIP_TL(0, 0));
- BCI_SEND(BCI_CLIP_BR(0xfff, 0xfff));
-}
-#endif
-
-static int savagefb_set_par (struct fb_info *info)
+static int savagefb_set_par(struct fb_info *info)
{
struct savagefb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
int err;
DBG("savagefb_set_par");
- err = savagefb_decode_var (var, par);
+ err = savagefb_decode_var(var, par, &par->state);
if (err)
return err;
@@ -1366,8 +1521,8 @@ static int savagefb_set_par (struct fb_info *info)
par->maxClock = par->dacSpeedBpp;
par->minClock = 10000;
- savagefb_set_par_int (par);
- fb_set_cmap (&info->cmap, info);
+ savagefb_set_par_int(par, &par->state);
+ fb_set_cmap(&info->cmap, info);
savagefb_set_fix(info);
savagefb_set_clip(info);
@@ -1378,12 +1533,12 @@ static int savagefb_set_par (struct fb_info *info)
/*
* Pan or Wrap the Display
*/
-static int savagefb_pan_display (struct fb_var_screeninfo *var,
- struct fb_info *info)
+static int savagefb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct savagefb_par *par = info->par;
- savagefb_update_start (par, var);
+ savagefb_update_start(par, var);
return 0;
}
@@ -1440,6 +1595,22 @@ static int savagefb_blank(int blank, struct fb_info *info)
return (blank == FB_BLANK_NORMAL) ? 1 : 0;
}
+static void savagefb_save_state(struct fb_info *info)
+{
+ struct savagefb_par *par = info->par;
+
+ savage_get_default_par(par, &par->save);
+}
+
+static void savagefb_restore_state(struct fb_info *info)
+{
+ struct savagefb_par *par = info->par;
+
+ savagefb_blank(FB_BLANK_POWERDOWN, info);
+ savage_set_default_par(par, &par->save);
+ savagefb_blank(FB_BLANK_UNBLANK, info);
+}
+
static struct fb_ops savagefb_ops = {
.owner = THIS_MODULE,
.fb_check_var = savagefb_check_var,
@@ -1447,6 +1618,8 @@ static struct fb_ops savagefb_ops = {
.fb_setcolreg = savagefb_setcolreg,
.fb_pan_display = savagefb_pan_display,
.fb_blank = savagefb_blank,
+ .fb_save_state = savagefb_save_state,
+ .fb_restore_state = savagefb_restore_state,
#if defined(CONFIG_FB_SAVAGE_ACCEL)
.fb_fillrect = savagefb_fillrect,
.fb_copyarea = savagefb_copyarea,
@@ -1479,59 +1652,59 @@ static struct fb_var_screeninfo __devinitdata savagefb_var800x600x8 = {
.vmode = FB_VMODE_NONINTERLACED
};
-static void savage_enable_mmio (struct savagefb_par *par)
+static void savage_enable_mmio(struct savagefb_par *par)
{
unsigned char val;
- DBG ("savage_enable_mmio\n");
+ DBG("savage_enable_mmio\n");
- val = vga_in8 (0x3c3, par);
- vga_out8 (0x3c3, val | 0x01, par);
- val = vga_in8 (0x3cc, par);
- vga_out8 (0x3c2, val | 0x01, par);
+ val = vga_in8(0x3c3, par);
+ vga_out8(0x3c3, val | 0x01, par);
+ val = vga_in8(0x3cc, par);
+ vga_out8(0x3c2, val | 0x01, par);
if (par->chip >= S3_SAVAGE4) {
- vga_out8 (0x3d4, 0x40, par);
- val = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, val | 1, par);
+ vga_out8(0x3d4, 0x40, par);
+ val = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, val | 1, par);
}
}
-static void savage_disable_mmio (struct savagefb_par *par)
+static void savage_disable_mmio(struct savagefb_par *par)
{
unsigned char val;
- DBG ("savage_disable_mmio\n");
+ DBG("savage_disable_mmio\n");
- if(par->chip >= S3_SAVAGE4 ) {
- vga_out8 (0x3d4, 0x40, par);
- val = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, val | 1, par);
+ if (par->chip >= S3_SAVAGE4) {
+ vga_out8(0x3d4, 0x40, par);
+ val = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, val | 1, par);
}
}
-static int __devinit savage_map_mmio (struct fb_info *info)
+static int __devinit savage_map_mmio(struct fb_info *info)
{
struct savagefb_par *par = info->par;
- DBG ("savage_map_mmio");
+ DBG("savage_map_mmio");
- if (S3_SAVAGE3D_SERIES (par->chip))
- par->mmio.pbase = pci_resource_start (par->pcidev, 0) +
+ if (S3_SAVAGE3D_SERIES(par->chip))
+ par->mmio.pbase = pci_resource_start(par->pcidev, 0) +
SAVAGE_NEWMMIO_REGBASE_S3;
else
- par->mmio.pbase = pci_resource_start (par->pcidev, 0) +
+ par->mmio.pbase = pci_resource_start(par->pcidev, 0) +
SAVAGE_NEWMMIO_REGBASE_S4;
par->mmio.len = SAVAGE_NEWMMIO_REGSIZE;
- par->mmio.vbase = ioremap (par->mmio.pbase, par->mmio.len);
+ par->mmio.vbase = ioremap(par->mmio.pbase, par->mmio.len);
if (!par->mmio.vbase) {
- printk ("savagefb: unable to map memory mapped IO\n");
+ printk("savagefb: unable to map memory mapped IO\n");
return -ENOMEM;
} else
- printk (KERN_INFO "savagefb: mapped io at %p\n",
+ printk(KERN_INFO "savagefb: mapped io at %p\n",
par->mmio.vbase);
info->fix.mmio_start = par->mmio.pbase;
@@ -1540,15 +1713,15 @@ static int __devinit savage_map_mmio (struct fb_info *info)
par->bci_base = (u32 __iomem *)(par->mmio.vbase + BCI_BUFFER_OFFSET);
par->bci_ptr = 0;
- savage_enable_mmio (par);
+ savage_enable_mmio(par);
return 0;
}
-static void savage_unmap_mmio (struct fb_info *info)
+static void savage_unmap_mmio(struct fb_info *info)
{
struct savagefb_par *par = info->par;
- DBG ("savage_unmap_mmio");
+ DBG("savage_unmap_mmio");
savage_disable_mmio(par);
@@ -1558,46 +1731,46 @@ static void savage_unmap_mmio (struct fb_info *info)
}
}
-static int __devinit savage_map_video (struct fb_info *info,
- int video_len)
+static int __devinit savage_map_video(struct fb_info *info,
+ int video_len)
{
struct savagefb_par *par = info->par;
int resource;
DBG("savage_map_video");
- if (S3_SAVAGE3D_SERIES (par->chip))
+ if (S3_SAVAGE3D_SERIES(par->chip))
resource = 0;
else
resource = 1;
- par->video.pbase = pci_resource_start (par->pcidev, resource);
+ par->video.pbase = pci_resource_start(par->pcidev, resource);
par->video.len = video_len;
- par->video.vbase = ioremap (par->video.pbase, par->video.len);
+ par->video.vbase = ioremap(par->video.pbase, par->video.len);
if (!par->video.vbase) {
- printk ("savagefb: unable to map screen memory\n");
+ printk("savagefb: unable to map screen memory\n");
return -ENOMEM;
} else
- printk (KERN_INFO "savagefb: mapped framebuffer at %p, "
- "pbase == %x\n", par->video.vbase, par->video.pbase);
+ printk(KERN_INFO "savagefb: mapped framebuffer at %p, "
+ "pbase == %x\n", par->video.vbase, par->video.pbase);
info->fix.smem_start = par->video.pbase;
info->fix.smem_len = par->video.len - par->cob_size;
info->screen_base = par->video.vbase;
#ifdef CONFIG_MTRR
- par->video.mtrr = mtrr_add (par->video.pbase, video_len,
- MTRR_TYPE_WRCOMB, 1);
+ par->video.mtrr = mtrr_add(par->video.pbase, video_len,
+ MTRR_TYPE_WRCOMB, 1);
#endif
/* Clear framebuffer, it's all white in memory after boot */
- memset_io (par->video.vbase, 0, par->video.len);
+ memset_io(par->video.vbase, 0, par->video.len);
return 0;
}
-static void savage_unmap_video (struct fb_info *info)
+static void savage_unmap_video(struct fb_info *info)
{
struct savagefb_par *par = info->par;
@@ -1605,16 +1778,16 @@ static void savage_unmap_video (struct fb_info *info)
if (par->video.vbase) {
#ifdef CONFIG_MTRR
- mtrr_del (par->video.mtrr, par->video.pbase, par->video.len);
+ mtrr_del(par->video.mtrr, par->video.pbase, par->video.len);
#endif
- iounmap (par->video.vbase);
+ iounmap(par->video.vbase);
par->video.vbase = NULL;
info->screen_base = NULL;
}
}
-static int savage_init_hw (struct savagefb_par *par)
+static int savage_init_hw(struct savagefb_par *par)
{
unsigned char config1, m, n, n1, n2, sr8, cr3f, cr66 = 0, tmp;
@@ -1656,7 +1829,7 @@ static int savage_init_hw (struct savagefb_par *par)
switch (par->chip) {
case S3_SAVAGE3D:
- videoRam = RamSavage3D[ (config1 & 0xC0) >> 6 ] * 1024;
+ videoRam = RamSavage3D[(config1 & 0xC0) >> 6 ] * 1024;
break;
case S3_SAVAGE4:
@@ -1667,22 +1840,22 @@ static int savage_init_hw (struct savagefb_par *par)
* can do it different...
*/
vga_out8(0x3d4, 0x68, par); /* memory control 1 */
- if( (vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6) )
+ if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6))
RamSavage4[1] = 8;
/*FALLTHROUGH*/
case S3_SAVAGE2000:
- videoRam = RamSavage4[ (config1 & 0xE0) >> 5 ] * 1024;
+ videoRam = RamSavage4[(config1 & 0xE0) >> 5] * 1024;
break;
case S3_SAVAGE_MX:
case S3_SUPERSAVAGE:
- videoRam = RamSavageMX[ (config1 & 0x0E) >> 1 ] * 1024;
+ videoRam = RamSavageMX[(config1 & 0x0E) >> 1] * 1024;
break;
case S3_PROSAVAGE:
- videoRam = RamSavageNB[ (config1 & 0xE0) >> 5 ] * 1024;
+ videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024;
break;
default:
@@ -1693,31 +1866,31 @@ static int savage_init_hw (struct savagefb_par *par)
videoRambytes = videoRam * 1024;
- printk (KERN_INFO "savagefb: probed videoram: %dk\n", videoRam);
+ printk(KERN_INFO "savagefb: probed videoram: %dk\n", videoRam);
/* reset graphics engine to avoid memory corruption */
- vga_out8 (0x3d4, 0x66, par);
- cr66 = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr66 | 0x02, par);
- udelay (10000);
+ vga_out8(0x3d4, 0x66, par);
+ cr66 = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr66 | 0x02, par);
+ udelay(10000);
- vga_out8 (0x3d4, 0x66, par);
- vga_out8 (0x3d5, cr66 & ~0x02, par); /* clear reset flag */
- udelay (10000);
+ vga_out8(0x3d4, 0x66, par);
+ vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
+ udelay(10000);
/*
* reset memory interface, 3D engine, AGP master, PCI master,
* master engine unit, motion compensation/LPB
*/
- vga_out8 (0x3d4, 0x3f, par);
- cr3f = vga_in8 (0x3d5, par);
- vga_out8 (0x3d5, cr3f | 0x08, par);
- udelay (10000);
+ vga_out8(0x3d4, 0x3f, par);
+ cr3f = vga_in8(0x3d5, par);
+ vga_out8(0x3d5, cr3f | 0x08, par);
+ udelay(10000);
- vga_out8 (0x3d4, 0x3f, par);
- vga_out8 (0x3d5, cr3f & ~0x08, par); /* clear reset flags */
- udelay (10000);
+ vga_out8(0x3d4, 0x3f, par);
+ vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
+ udelay(10000);
/* Savage ramdac speeds */
par->numClocks = 4;
@@ -1740,7 +1913,7 @@ static int savage_init_hw (struct savagefb_par *par)
n1 = n & 0x1f;
n2 = (n >> 5) & 0x03;
par->MCLK = ((1431818 * (m+2)) / (n1+2) / (1 << n2) + 50) / 100;
- printk (KERN_INFO "savagefb: Detected current MCLK value of %d kHz\n",
+ printk(KERN_INFO "savagefb: Detected current MCLK value of %d kHz\n",
par->MCLK);
/* check for DVI/flat panel */
@@ -1769,12 +1942,12 @@ static int savage_init_hw (struct savagefb_par *par)
/* Check LCD panel parrmation */
if (par->display_type == DISP_LCD) {
- unsigned char cr6b = VGArCR( 0x6b, par);
+ unsigned char cr6b = VGArCR(0x6b, par);
- int panelX = (VGArSEQ (0x61, par) +
- ((VGArSEQ (0x66, par) & 0x02) << 7) + 1) * 8;
- int panelY = (VGArSEQ (0x69, par) +
- ((VGArSEQ (0x6e, par) & 0x70) << 4) + 1);
+ int panelX = (VGArSEQ(0x61, par) +
+ ((VGArSEQ(0x66, par) & 0x02) << 7) + 1) * 8;
+ int panelY = (VGArSEQ(0x69, par) +
+ ((VGArSEQ(0x6e, par) & 0x70) << 4) + 1);
char * sTechnology = "Unknown";
@@ -1796,26 +1969,26 @@ static int savage_init_hw (struct savagefb_par *par)
ActiveDUO = 0x80
};
- if ((VGArSEQ (0x39, par) & 0x03) == 0) {
+ if ((VGArSEQ(0x39, par) & 0x03) == 0) {
sTechnology = "TFT";
- } else if ((VGArSEQ (0x30, par) & 0x01) == 0) {
+ } else if ((VGArSEQ(0x30, par) & 0x01) == 0) {
sTechnology = "DSTN";
} else {
sTechnology = "STN";
}
- printk (KERN_INFO "savagefb: %dx%d %s LCD panel detected %s\n",
- panelX, panelY, sTechnology,
- cr6b & ActiveLCD ? "and active" : "but not active");
+ printk(KERN_INFO "savagefb: %dx%d %s LCD panel detected %s\n",
+ panelX, panelY, sTechnology,
+ cr6b & ActiveLCD ? "and active" : "but not active");
- if( cr6b & ActiveLCD ) {
+ if (cr6b & ActiveLCD) {
/*
* If the LCD is active and panel expansion is enabled,
* we probably want to kill the HW cursor.
*/
- printk (KERN_INFO "savagefb: Limiting video mode to "
- "%dx%d\n", panelX, panelY );
+ printk(KERN_INFO "savagefb: Limiting video mode to "
+ "%dx%d\n", panelX, panelY);
par->SavagePanelWidth = panelX;
par->SavagePanelHeight = panelY;
@@ -1824,9 +1997,10 @@ static int savage_init_hw (struct savagefb_par *par)
par->display_type = DISP_CRT;
}
- savage_get_default_par (par);
+ savage_get_default_par(par, &par->state);
+ par->save = par->state;
- if( S3_SAVAGE4_SERIES(par->chip) ) {
+ if (S3_SAVAGE4_SERIES(par->chip)) {
/*
* The Savage4 and ProSavage have COB coherency bugs which
* render the buffer useless. We disable it.
@@ -1845,9 +2019,9 @@ static int savage_init_hw (struct savagefb_par *par)
return videoRambytes;
}
-static int __devinit savage_init_fb_info (struct fb_info *info,
- struct pci_dev *dev,
- const struct pci_device_id *id)
+static int __devinit savage_init_fb_info(struct fb_info *info,
+ struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct savagefb_par *par = info->par;
int err = 0;
@@ -1863,63 +2037,63 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
switch (info->fix.accel) {
case FB_ACCEL_SUPERSAVAGE:
par->chip = S3_SUPERSAVAGE;
- snprintf (info->fix.id, 16, "SuperSavage");
+ snprintf(info->fix.id, 16, "SuperSavage");
break;
case FB_ACCEL_SAVAGE4:
par->chip = S3_SAVAGE4;
- snprintf (info->fix.id, 16, "Savage4");
+ snprintf(info->fix.id, 16, "Savage4");
break;
case FB_ACCEL_SAVAGE3D:
par->chip = S3_SAVAGE3D;
- snprintf (info->fix.id, 16, "Savage3D");
+ snprintf(info->fix.id, 16, "Savage3D");
break;
case FB_ACCEL_SAVAGE3D_MV:
par->chip = S3_SAVAGE3D;
- snprintf (info->fix.id, 16, "Savage3D-MV");
+ snprintf(info->fix.id, 16, "Savage3D-MV");
break;
case FB_ACCEL_SAVAGE2000:
par->chip = S3_SAVAGE2000;
- snprintf (info->fix.id, 16, "Savage2000");
+ snprintf(info->fix.id, 16, "Savage2000");
break;
case FB_ACCEL_SAVAGE_MX_MV:
par->chip = S3_SAVAGE_MX;
- snprintf (info->fix.id, 16, "Savage/MX-MV");
+ snprintf(info->fix.id, 16, "Savage/MX-MV");
break;
case FB_ACCEL_SAVAGE_MX:
par->chip = S3_SAVAGE_MX;
- snprintf (info->fix.id, 16, "Savage/MX");
+ snprintf(info->fix.id, 16, "Savage/MX");
break;
case FB_ACCEL_SAVAGE_IX_MV:
par->chip = S3_SAVAGE_MX;
- snprintf (info->fix.id, 16, "Savage/IX-MV");
+ snprintf(info->fix.id, 16, "Savage/IX-MV");
break;
case FB_ACCEL_SAVAGE_IX:
par->chip = S3_SAVAGE_MX;
- snprintf (info->fix.id, 16, "Savage/IX");
+ snprintf(info->fix.id, 16, "Savage/IX");
break;
case FB_ACCEL_PROSAVAGE_PM:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "ProSavagePM");
+ snprintf(info->fix.id, 16, "ProSavagePM");
break;
case FB_ACCEL_PROSAVAGE_KM:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "ProSavageKM");
+ snprintf(info->fix.id, 16, "ProSavageKM");
break;
case FB_ACCEL_S3TWISTER_P:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "TwisterP");
+ snprintf(info->fix.id, 16, "TwisterP");
break;
case FB_ACCEL_S3TWISTER_K:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "TwisterK");
+ snprintf(info->fix.id, 16, "TwisterK");
break;
case FB_ACCEL_PROSAVAGE_DDR:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "ProSavageDDR");
+ snprintf(info->fix.id, 16, "ProSavageDDR");
break;
case FB_ACCEL_PROSAVAGE_DDRK:
par->chip = S3_PROSAVAGE;
- snprintf (info->fix.id, 16, "ProSavage8");
+ snprintf(info->fix.id, 16, "ProSavage8");
break;
}
@@ -1960,7 +2134,7 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
info->pixmap.buf_align = 4;
info->pixmap.access_align = 32;
- err = fb_alloc_cmap (&info->cmap, NR_PALETTE, 0);
+ err = fb_alloc_cmap(&info->cmap, NR_PALETTE, 0);
if (!err)
info->flags |= FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
@@ -1972,8 +2146,8 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
/* --------------------------------------------------------------------- */
-static int __devinit savagefb_probe (struct pci_dev* dev,
- const struct pci_device_id* id)
+static int __devinit savagefb_probe(struct pci_dev* dev,
+ const struct pci_device_id* id)
{
struct fb_info *info;
struct savagefb_par *par;
@@ -2085,12 +2259,12 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb = NULL;
- err = register_framebuffer (info);
+ err = register_framebuffer(info);
if (err < 0)
goto failed;
- printk (KERN_INFO "fb: S3 %s frame buffer device\n",
- info->fix.id);
+ printk(KERN_INFO "fb: S3 %s frame buffer device\n",
+ info->fix.id);
/*
* Our driver data
@@ -2103,10 +2277,10 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
#ifdef CONFIG_FB_SAVAGE_I2C
savagefb_delete_i2c_busses(info);
#endif
- fb_alloc_cmap (&info->cmap, 0, 0);
+ fb_alloc_cmap(&info->cmap, 0, 0);
savage_unmap_video(info);
failed_video:
- savage_unmap_mmio (info);
+ savage_unmap_mmio(info);
failed_mmio:
kfree(info->pixmap.addr);
failed_init:
@@ -2117,7 +2291,7 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
return err;
}
-static void __devexit savagefb_remove (struct pci_dev *dev)
+static void __devexit savagefb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -2129,16 +2303,16 @@ static void __devexit savagefb_remove (struct pci_dev *dev)
* we will be leaving hooks that could cause
* oopsen laying around.
*/
- if (unregister_framebuffer (info))
- printk (KERN_WARNING "savagefb: danger danger! "
- "Oopsen imminent!\n");
+ if (unregister_framebuffer(info))
+ printk(KERN_WARNING "savagefb: danger danger! "
+ "Oopsen imminent!\n");
#ifdef CONFIG_FB_SAVAGE_I2C
savagefb_delete_i2c_busses(info);
#endif
- fb_alloc_cmap (&info->cmap, 0, 0);
- savage_unmap_video (info);
- savage_unmap_mmio (info);
+ fb_alloc_cmap(&info->cmap, 0, 0);
+ savage_unmap_video(info);
+ savage_unmap_mmio(info);
kfree(info->pixmap.addr);
pci_release_regions(dev);
framebuffer_release(info);
@@ -2151,7 +2325,7 @@ static void __devexit savagefb_remove (struct pci_dev *dev)
}
}
-static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
+static int savagefb_suspend(struct pci_dev* dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
struct savagefb_par *par = info->par;
@@ -2177,6 +2351,7 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
info->fbops->fb_sync(info);
savagefb_blank(FB_BLANK_POWERDOWN, info);
+ savage_set_default_par(par, &par->save);
savage_disable_mmio(par);
pci_save_state(dev);
pci_disable_device(dev);
@@ -2186,7 +2361,7 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
return 0;
}
-static int savagefb_resume (struct pci_dev* dev)
+static int savagefb_resume(struct pci_dev* dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct savagefb_par *par = info->par;
@@ -2210,15 +2385,15 @@ static int savagefb_resume (struct pci_dev* dev)
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
- if(pci_enable_device(dev))
+ if (pci_enable_device(dev))
DBG("err");
pci_set_master(dev);
savage_enable_mmio(par);
savage_init_hw(par);
- savagefb_set_par (info);
+ savagefb_set_par(info);
+ fb_set_suspend(info, 0);
savagefb_blank(FB_BLANK_UNBLANK, info);
- fb_set_suspend (info, 0);
release_console_sem();
return 0;
@@ -2311,10 +2486,10 @@ static struct pci_driver savagefb_driver = {
/* **************************** exit-time only **************************** */
-static void __exit savage_done (void)
+static void __exit savage_done(void)
{
DBG("savage_done");
- pci_unregister_driver (&savagefb_driver);
+ pci_unregister_driver(&savagefb_driver);
}
@@ -2345,7 +2520,7 @@ static int __init savagefb_init(void)
return -ENODEV;
savagefb_setup(option);
- return pci_register_driver (&savagefb_driver);
+ return pci_register_driver(&savagefb_driver);
}
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 2e6df1fcb2b9..c0cc5e3ba7b5 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -23,6 +23,8 @@
#include <asm/io.h>
#include <asm/mtrr.h>
+#include <setup_arch.h>
+
#define INCLUDE_TIMING_TABLE_DATA
#define DBE_REG_BASE par->regs
#include <video/sgivw.h>
@@ -42,10 +44,6 @@ struct sgivw_par {
* The default can be overridden if the driver is compiled as a module
*/
-/* set by arch/i386/kernel/setup.c */
-extern unsigned long sgivwfb_mem_phys;
-extern unsigned long sgivwfb_mem_size;
-
static int ypan = 0;
static int ywrap = 0;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 8adf5bf91eee..c63c0e721b82 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -275,7 +275,7 @@ sisfb_search_mode(char *name, BOOLEAN quiet)
static void __devinit
sisfb_get_vga_mode_from_kernel(void)
{
-#if (defined(__i386__) || defined(__x86_64__)) && defined(CONFIG_VIDEO_SELECT)
+#ifdef CONFIG_X86
char mymode[32];
int mydepth = screen_info.lfb_depth;
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 9b707771d757..67f429e93189 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -906,11 +906,6 @@ static void __exit xxxfb_exit(void)
}
#endif
-MODULE_LICENSE("GPL");
-module_init(xxxfb_init);
-module_exit(xxxfb_exit);
-
-
/*
* Setup
*/
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 7398bd48ba6c..6c2c78ab9827 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -26,7 +26,6 @@
#include <linux/selection.h>
#include <asm/io.h>
#include <video/tgafb.h>
-#include <linux/selection.h>
/*
* Local functions.
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index b0b9acfdd430..5718924b677f 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -51,7 +51,7 @@ static int inverse = 0;
static int mtrr = 0; /* disable mtrr */
static int vram_remap __initdata = 0; /* Set amount of memory to be used */
static int vram_total __initdata = 0; /* Set total amount of memory */
-static int pmi_setpal = 0; /* pmi for palette changes ??? */
+static int pmi_setpal = 1; /* pmi for palette changes ??? */
static int ypan = 0; /* 0..nothing, 1..ypan, 2..ywrap */
static unsigned short *pmi_base = NULL;
static void (*pmi_start)(void);
@@ -80,15 +80,30 @@ static int vesafb_pan_display(struct fb_var_screeninfo *var,
return 0;
}
-static void vesa_setpalette(int regno, unsigned red, unsigned green,
+static int vesa_setpalette(int regno, unsigned red, unsigned green,
unsigned blue)
{
int shift = 16 - depth;
+ int err = -EINVAL;
+
+/*
+ * Try VGA registers first...
+ */
+ if (vga_compat) {
+ outb_p(regno, dac_reg);
+ outb_p(red >> shift, dac_val);
+ outb_p(green >> shift, dac_val);
+ outb_p(blue >> shift, dac_val);
+ err = 0;
+ }
#ifdef __i386__
- struct { u_char blue, green, red, pad; } entry;
+/*
+ * Fallback to the PMI....
+ */
+ if (err && pmi_setpal) {
+ struct { u_char blue, green, red, pad; } entry;
- if (pmi_setpal) {
entry.red = red >> shift;
entry.green = green >> shift;
entry.blue = blue >> shift;
@@ -102,26 +117,19 @@ static void vesa_setpalette(int regno, unsigned red, unsigned green,
"d" (regno), /* EDX */
"D" (&entry), /* EDI */
"S" (&pmi_pal)); /* ESI */
- return;
+ err = 0;
}
#endif
-/*
- * without protected mode interface and if VGA compatible,
- * try VGA registers...
- */
- if (vga_compat) {
- outb_p(regno, dac_reg);
- outb_p(red >> shift, dac_val);
- outb_p(green >> shift, dac_val);
- outb_p(blue >> shift, dac_val);
- }
+ return err;
}
static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
+ int err = 0;
+
/*
* Set a single color register. The values supplied are
* already rounded down to the hardware's capabilities
@@ -133,7 +141,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
if (info->var.bits_per_pixel == 8)
- vesa_setpalette(regno,red,green,blue);
+ err = vesa_setpalette(regno,red,green,blue);
else if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 16:
@@ -164,7 +172,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
}
- return 0;
+ return err;
}
static struct fb_ops vesafb_ops = {
@@ -460,9 +468,7 @@ static struct platform_driver vesafb_driver = {
},
};
-static struct platform_device vesafb_device = {
- .name = "vesafb",
-};
+static struct platform_device *vesafb_device;
static int __init vesafb_init(void)
{
@@ -475,10 +481,19 @@ static int __init vesafb_init(void)
ret = platform_driver_register(&vesafb_driver);
if (!ret) {
- ret = platform_device_register(&vesafb_device);
- if (ret)
+ vesafb_device = platform_device_alloc("vesafb", 0);
+
+ if (vesafb_device)
+ ret = platform_device_add(vesafb_device);
+ else
+ ret = -ENOMEM;
+
+ if (ret) {
+ platform_device_put(vesafb_device);
platform_driver_unregister(&vesafb_driver);
+ }
}
+
return ret;
}
module_init(vesafb_init);
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 77eed1fd9943..d073ffb6e1f9 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -398,12 +398,6 @@ static int __init vfb_setup(char *options)
* Initialisation
*/
-static void vfb_platform_release(struct device *device)
-{
- // This is called when the reference count goes to zero.
- dev_err(device, "This driver is broken, please bug the authors so they will fix it.\n");
-}
-
static int __init vfb_probe(struct platform_device *dev)
{
struct fb_info *info;
@@ -482,13 +476,7 @@ static struct platform_driver vfb_driver = {
},
};
-static struct platform_device vfb_device = {
- .name = "vfb",
- .id = 0,
- .dev = {
- .release = vfb_platform_release,
- }
-};
+static struct platform_device *vfb_device;
static int __init vfb_init(void)
{
@@ -508,10 +496,19 @@ static int __init vfb_init(void)
ret = platform_driver_register(&vfb_driver);
if (!ret) {
- ret = platform_device_register(&vfb_device);
- if (ret)
+ vfb_device = platform_device_alloc("vfb", 0);
+
+ if (vfb_device)
+ ret = platform_device_add(vfb_device);
+ else
+ ret = -ENOMEM;
+
+ if (ret) {
+ platform_device_put(vfb_device);
platform_driver_unregister(&vfb_driver);
+ }
}
+
return ret;
}
@@ -520,7 +517,7 @@ module_init(vfb_init);
#ifdef MODULE
static void __exit vfb_exit(void)
{
- platform_device_unregister(&vfb_device);
+ platform_device_unregister(vfb_device);
platform_driver_unregister(&vfb_driver);
}
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 4fd2a272e03d..3c404c9bd36c 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1334,9 +1334,8 @@ static int vga16fb_setup(char *options)
}
#endif
-static int __init vga16fb_probe(struct device *device)
+static int __init vga16fb_probe(struct platform_device *dev)
{
- struct platform_device *dev = to_platform_device(device);
struct fb_info *info;
struct vga16fb_par *par;
int i;
@@ -1403,7 +1402,7 @@ static int __init vga16fb_probe(struct device *device)
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
- dev_set_drvdata(device, info);
+ platform_set_drvdata(dev, info);
return 0;
@@ -1417,9 +1416,9 @@ static int __init vga16fb_probe(struct device *device)
return ret;
}
-static int vga16fb_remove(struct device *device)
+static int vga16fb_remove(struct platform_device *dev)
{
- struct fb_info *info = dev_get_drvdata(device);
+ struct fb_info *info = platform_get_drvdata(dev);
if (info) {
unregister_framebuffer(info);
@@ -1432,16 +1431,15 @@ static int vga16fb_remove(struct device *device)
return 0;
}
-static struct device_driver vga16fb_driver = {
- .name = "vga16fb",
- .bus = &platform_bus_type,
+static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
.remove = vga16fb_remove,
+ .driver = {
+ .name = "vga16fb",
+ },
};
-static struct platform_device vga16fb_device = {
- .name = "vga16fb",
-};
+static struct platform_device *vga16fb_device;
static int __init vga16fb_init(void)
{
@@ -1454,12 +1452,20 @@ static int __init vga16fb_init(void)
vga16fb_setup(option);
#endif
- ret = driver_register(&vga16fb_driver);
+ ret = platform_driver_register(&vga16fb_driver);
if (!ret) {
- ret = platform_device_register(&vga16fb_device);
- if (ret)
- driver_unregister(&vga16fb_driver);
+ vga16fb_device = platform_device_alloc("vga16fb", 0);
+
+ if (vga16fb_device)
+ ret = platform_device_add(vga16fb_device);
+ else
+ ret = -ENOMEM;
+
+ if (ret) {
+ platform_device_put(vga16fb_device);
+ platform_driver_unregister(&vga16fb_driver);
+ }
}
return ret;
@@ -1467,8 +1473,8 @@ static int __init vga16fb_init(void)
static void __exit vga16fb_exit(void)
{
- platform_device_unregister(&vga16fb_device);
- driver_unregister(&vga16fb_driver);
+ platform_device_unregister(vga16fb_device);
+ platform_driver_unregister(&vga16fb_driver);
}
MODULE_LICENSE("GPL");
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index f4407eb276c7..8d45ed668837 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -712,7 +712,7 @@ static void v9fs_read_work(void *a)
* v9fs_send_request - send 9P request
* The function can sleep until the request is scheduled for sending.
* The function can be interrupted. Return from the function is not
- * a guarantee that the request is sent succesfully. Can return errors
+ * a guarantee that the request is sent successfully. Can return errors
* that can be retrieved by PTR_ERR macros.
*
* @m: mux data
@@ -932,6 +932,8 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
r.rcall || r.err);
} while (!r.rcall && !r.err && err==-ERESTARTSYS &&
m->trans->status==Connected && !m->err);
+
+ err = -ERESTARTSYS;
}
sigpending = 1;
}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index f867b8d3e973..450b0c1b385e 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -38,7 +38,7 @@
*/
extern struct file_system_type v9fs_fs_type;
-extern struct address_space_operations v9fs_addr_operations;
+extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
extern const struct file_operations v9fs_dir_operations;
extern struct dentry_operations v9fs_dentry_operations;
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index efda46fb64d9..d4f0aa3c87f2 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -103,6 +103,6 @@ UnmapAndUnlock:
return retval;
}
-struct address_space_operations v9fs_addr_operations = {
+const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5c6bdf82146c..2f580a197b8d 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -300,7 +300,7 @@ clunk_fid:
fid = V9FS_NOFID;
put_fid:
- if (fid >= 0)
+ if (fid != V9FS_NOFID)
v9fs_put_idpool(fid, &v9ses->fidpool);
kfree(fcall);
diff --git a/fs/Kconfig b/fs/Kconfig
index 1cdc043922d5..6dc8cfd6d80c 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1116,7 +1116,7 @@ config JFFS2_SUMMARY
config JFFS2_FS_XATTR
bool "JFFS2 XATTR support (EXPERIMENTAL)"
- depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER
+ depends on JFFS2_FS && EXPERIMENTAL
default n
help
Extended attributes are name:value pairs associated with inodes by
@@ -1490,7 +1490,12 @@ config NFSD
select LOCKD
select SUNRPC
select EXPORTFS
- select NFS_ACL_SUPPORT if NFSD_V3_ACL || NFSD_V2_ACL
+ select NFSD_V2_ACL if NFSD_V3_ACL
+ select NFS_ACL_SUPPORT if NFSD_V2_ACL
+ select NFSD_TCP if NFSD_V4
+ select CRYPTO_MD5 if NFSD_V4
+ select CRYPTO if NFSD_V4
+ select FS_POSIX_ACL if NFSD_V4
help
If you want your Linux box to act as an NFS *server*, so that other
computers on your local network which support NFS can access certain
@@ -1528,7 +1533,6 @@ config NFSD_V3
config NFSD_V3_ACL
bool "Provide server support for the NFSv3 ACL protocol extension"
depends on NFSD_V3
- select NFSD_V2_ACL
help
Implement the NFSv3 ACL protocol extension for manipulating POSIX
Access Control Lists on exported file systems. NFS clients should
@@ -1538,10 +1542,6 @@ config NFSD_V3_ACL
config NFSD_V4
bool "Provide NFSv4 server support (EXPERIMENTAL)"
depends on NFSD_V3 && EXPERIMENTAL
- select NFSD_TCP
- select CRYPTO_MD5
- select CRYPTO
- select FS_POSIX_ACL
help
If you would like to include the NFSv4 server as well as the NFSv2
and NFSv3 servers, say Y here. This feature is experimental, and
@@ -1722,7 +1722,7 @@ config CIFS_STATS
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
config CIFS_STATS2
- bool "CIFS extended statistics"
+ bool "Extended statistics"
depends on CIFS_STATS
help
Enabling this option will allow more detailed statistics on SMB
@@ -1735,6 +1735,32 @@ config CIFS_STATS2
Unless you are a developer or are doing network performance analysis
or tuning, say N.
+config CIFS_WEAK_PW_HASH
+ bool "Support legacy servers which use weaker LANMAN security"
+ depends on CIFS
+ help
+ Modern CIFS servers including Samba and most Windows versions
+ (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
+ security mechanisms. These hash the password more securely
+ than the mechanisms used in the older LANMAN version of the
+ SMB protocol needed to establish sessions with old SMB servers.
+
+ Enabling this option allows the cifs module to mount to older
+ LANMAN based servers such as OS/2 and Windows 95, but such
+ mounts may be less secure than mounts using NTLM or more recent
+ security mechanisms if you are on a public network. Unless you
+ have a need to access old SMB servers (and are on a private
+ network) you probably want to say N. Even if this support
+ is enabled in the kernel build, they will not be used
+ automatically. At runtime LANMAN mounts are disabled but
+ can be set to required (or optional) either in
+ /proc/fs/cifs (see fs/cifs/README for more detail) or via an
+ option on the mount command. This support is disabled by
+ default in order to reduce the possibility of a downgrade
+ attack.
+
+ If unsure, say N.
+
config CIFS_XATTR
bool "CIFS extended attributes"
depends on CIFS
@@ -1763,6 +1789,16 @@ config CIFS_POSIX
(such as Samba 3.10 and later) which can negotiate
CIFS POSIX ACL support. If unsure, say N.
+config CIFS_DEBUG2
+ bool "Enable additional CIFS debugging routines"
+ help
+ Enabling this option adds a few more debugging routines
+ to the cifs code which slightly increases the size of
+ the cifs module and can cause additional logging of debug
+ messages in some error paths, slowing performance. This
+ option can be turned off unless you are debugging
+ cifs problems. If unsure, say N.
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
@@ -1778,7 +1814,7 @@ config CIFS_EXPERIMENTAL
If unsure, say N.
config CIFS_UPCALL
- bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
+ bool "Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
depends on CIFS_EXPERIMENTAL
select CONNECTOR
help
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index a02802a30798..534f3eecc985 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -72,7 +72,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, adfs_get_block);
}
-static struct address_space_operations adfs_aops = {
+static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage,
.writepage = adfs_writepage,
.sync_page = block_sync_page,
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a43a876742b8..0ddd4cc0d1a0 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -195,9 +195,9 @@ extern struct inode_operations affs_symlink_inode_operations;
extern const struct file_operations affs_file_operations;
extern const struct file_operations affs_file_operations_ofs;
extern const struct file_operations affs_dir_operations;
-extern struct address_space_operations affs_symlink_aops;
-extern struct address_space_operations affs_aops;
-extern struct address_space_operations affs_aops_ofs;
+extern const struct address_space_operations affs_symlink_aops;
+extern const struct address_space_operations affs_aops;
+extern const struct address_space_operations affs_aops_ofs;
extern struct dentry_operations affs_dentry_operations;
extern struct dentry_operations affs_dentry_operations_intl;
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 7076262af39b..3de8590e4f6a 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -406,7 +406,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,affs_get_block);
}
-struct address_space_operations affs_aops = {
+const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
.sync_page = block_sync_page,
@@ -759,7 +759,7 @@ out:
goto done;
}
-struct address_space_operations affs_aops_ofs = {
+const struct address_space_operations affs_aops_ofs = {
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
//.sync_page = affs_sync_page_ofs,
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index 426f0f094f23..f802256a5933 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -66,7 +66,7 @@ fail:
return err;
}
-struct address_space_operations affs_symlink_aops = {
+const struct address_space_operations affs_symlink_aops = {
.readpage = affs_symlink_readpage,
};
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 009a9ae88d61..bfc1fd22d5b1 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -413,8 +413,7 @@ int afs_server_find_by_peer(const struct rxrpc_peer *peer,
/* we found it in the graveyard - resurrect it */
found_dead_server:
- list_del(&server->link);
- list_add_tail(&server->link, &cell->sv_list);
+ list_move_tail(&server->link, &cell->sv_list);
afs_get_server(server);
afs_kafstimod_del_timer(&server->timeout);
spin_unlock(&cell->sv_gylock);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 7bb716887e29..67d6634101fd 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -35,7 +35,7 @@ struct inode_operations afs_file_inode_operations = {
.getattr = afs_inode_getattr,
};
-struct address_space_operations afs_fs_aops = {
+const struct address_space_operations afs_fs_aops = {
.readpage = afs_file_readpage,
.sync_page = block_sync_page,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 72febdf9a35a..e88b3b65ae49 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -69,7 +69,7 @@ extern const struct file_operations afs_dir_file_operations;
/*
* file.c
*/
-extern struct address_space_operations afs_fs_aops;
+extern const struct address_space_operations afs_fs_aops;
extern struct inode_operations afs_file_inode_operations;
#ifdef AFS_CACHING_SUPPORT
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
index 7ac07d0d47b9..f09a794f248e 100644
--- a/fs/afs/kafsasyncd.c
+++ b/fs/afs/kafsasyncd.c
@@ -136,8 +136,7 @@ static int kafsasyncd(void *arg)
if (!list_empty(&kafsasyncd_async_attnq)) {
op = list_entry(kafsasyncd_async_attnq.next,
struct afs_async_op, link);
- list_del(&op->link);
- list_add_tail(&op->link,
+ list_move_tail(&op->link,
&kafsasyncd_async_busyq);
}
@@ -204,8 +203,7 @@ void afs_kafsasyncd_begin_op(struct afs_async_op *op)
init_waitqueue_entry(&op->waiter, kafsasyncd_task);
add_wait_queue(&op->call->waitq, &op->waiter);
- list_del(&op->link);
- list_add_tail(&op->link, &kafsasyncd_async_busyq);
+ list_move_tail(&op->link, &kafsasyncd_async_busyq);
spin_unlock(&kafsasyncd_async_lock);
@@ -223,8 +221,7 @@ void afs_kafsasyncd_attend_op(struct afs_async_op *op)
spin_lock(&kafsasyncd_async_lock);
- list_del(&op->link);
- list_add_tail(&op->link, &kafsasyncd_async_attnq);
+ list_move_tail(&op->link, &kafsasyncd_async_attnq);
spin_unlock(&kafsasyncd_async_lock);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 62b093aa41c6..22afaae1a4ce 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -123,8 +123,7 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
resurrect_server:
_debug("resurrecting server");
- list_del(&zombie->link);
- list_add_tail(&zombie->link, &cell->sv_list);
+ list_move_tail(&zombie->link, &cell->sv_list);
afs_get_server(zombie);
afs_kafstimod_del_timer(&zombie->timeout);
spin_unlock(&cell->sv_gylock);
@@ -168,8 +167,7 @@ void afs_put_server(struct afs_server *server)
}
spin_lock(&cell->sv_gylock);
- list_del(&server->link);
- list_add_tail(&server->link, &cell->sv_graveyard);
+ list_move_tail(&server->link, &cell->sv_graveyard);
/* time out in 10 secs */
afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index eced20618ecc..331f730a1fb3 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -326,8 +326,7 @@ int afs_vlocation_lookup(struct afs_cell *cell,
/* found in the graveyard - resurrect */
_debug("found in graveyard");
atomic_inc(&vlocation->usage);
- list_del(&vlocation->link);
- list_add_tail(&vlocation->link, &cell->vl_list);
+ list_move_tail(&vlocation->link, &cell->vl_list);
spin_unlock(&cell->vl_gylock);
afs_kafstimod_del_timer(&vlocation->timeout);
@@ -478,8 +477,7 @@ static void __afs_put_vlocation(struct afs_vlocation *vlocation)
}
/* move to graveyard queue */
- list_del(&vlocation->link);
- list_add_tail(&vlocation->link,&cell->vl_graveyard);
+ list_move_tail(&vlocation->link,&cell->vl_graveyard);
/* remove from pending timeout queue (refcounted if actually being
* updated) */
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
index 9867fef3261d..cf62da5d7825 100644
--- a/fs/afs/vnode.c
+++ b/fs/afs/vnode.c
@@ -104,8 +104,7 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
vnode->cb_expiry * HZ);
spin_lock(&afs_cb_hash_lock);
- list_del(&vnode->cb_hash_link);
- list_add_tail(&vnode->cb_hash_link,
+ list_move_tail(&vnode->cb_hash_link,
&afs_cb_hash(server, &vnode->fid));
spin_unlock(&afs_cb_hash_lock);
diff --git a/fs/aio.c b/fs/aio.c
index 8c34a62df7d7..950630187acc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -641,7 +641,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
* invoked both for initial i/o submission and
* subsequent retries via the aio_kick_handler.
* Expects to be invoked with iocb->ki_ctx->lock
- * already held. The lock is released and reaquired
+ * already held. The lock is released and reacquired
* as needed during processing.
*
* Calls the iocb retry method (already setup for the
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 4456d1daa40f..8dbd44f10e9d 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -376,8 +376,7 @@ next:
DPRINTK("returning %p %.*s",
expired, (int)expired->d_name.len, expired->d_name.name);
spin_lock(&dcache_lock);
- list_del(&expired->d_parent->d_subdirs);
- list_add(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
+ list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&dcache_lock);
return expired;
}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 08201fab26cd..a83e889a97cd 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -73,7 +73,7 @@ static struct inode_operations befs_dir_inode_operations = {
.lookup = befs_lookup,
};
-static struct address_space_operations befs_aops = {
+static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
.sync_page = block_sync_page,
.bmap = befs_bmap,
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
index 9d791004b21c..31973bbbf057 100644
--- a/fs/bfs/bfs.h
+++ b/fs/bfs/bfs.h
@@ -50,7 +50,7 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode)
/* file.c */
extern struct inode_operations bfs_file_inops;
extern const struct file_operations bfs_file_operations;
-extern struct address_space_operations bfs_aops;
+extern const struct address_space_operations bfs_aops;
/* dir.c */
extern struct inode_operations bfs_dir_inops;
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index d83cd74a2e4e..3d5aca28a0a0 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -153,7 +153,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, bfs_get_block);
}
-struct address_space_operations bfs_aops = {
+const struct address_space_operations bfs_aops = {
.readpage = bfs_readpage,
.writepage = bfs_writepage,
.sync_page = block_sync_page,
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index b1c902e319c1..c94d52eafd1b 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -510,7 +510,7 @@ static int load_flat_file(struct linux_binprm * bprm,
}
/* OK, This is the point of no return */
- set_personality(PER_LINUX);
+ set_personality(PER_LINUX_32BIT);
}
/*
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 028d9fb9c2d5..7f7600e2381c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1095,7 +1095,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
}
-struct address_space_operations def_blk_aops = {
+const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
.sync_page = block_sync_page,
diff --git a/fs/buffer.c b/fs/buffer.c
index 373bb6292bdc..e9994722f4a3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -564,7 +564,7 @@ still_busy:
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
@@ -2598,7 +2598,7 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned to;
struct page *page;
- struct address_space_operations *a_ops = mapping->a_ops;
+ const struct address_space_operations *a_ops = mapping->a_ops;
char *kaddr;
int ret = 0;
@@ -3166,7 +3166,6 @@ EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(file_fsync);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7271bb0257f6..a61d17ed1827 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,9 +1,24 @@
+Version 1.44
+------------
+Rewritten sessionsetup support, including support for legacy SMB
+session setup needed for OS/2 and older servers such as Windows 95 and 98.
+Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst
+so we can do search (ls etc.) to OS/2. Do not send NTCreateX
+or recent levels of FindFirst unless server says it supports NT SMBs
+(instead use legacy equivalents from LANMAN dialect). Fix to allow
+NTLMv2 authentication support (now can use stronger password hashing
+on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004).
+Allow override of global cifs security flags on mount via "sec=" option(s).
+
Version 1.43
------------
POSIX locking to servers which support CIFS POSIX Extensions
(disabled by default controlled by proc/fs/cifs/Experimental).
Handle conversion of long share names (especially Asian languages)
-to Unicode during mount.
+to Unicode during mount. Fix memory leak in sess struct on reconnect.
+Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on
+cifs open which helps rare case when setpathinfo fails or server does
+not support it.
Version 1.42
------------
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 58c77254a23b..a26f26ed5a17 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_CIFS) += cifs.o
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o
diff --git a/fs/cifs/README b/fs/cifs/README
index 0355003f4f0a..7986d0d97ace 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -443,7 +443,10 @@ A partial list of the supported mount options follows:
SFU does). In the future the bottom 9 bits of the mode
mode also will be emulated using queries of the security
descriptor (ACL).
-sec Security mode. Allowed values are:
+ sign Must use packet signing (helps avoid unwanted data modification
+ by intermediate systems in the route). Note that signing
+ does not work with lanman or plaintext authentication.
+ sec Security mode. Allowed values are:
none attempt to connection as a null user (no name)
krb5 Use Kerberos version 5 authentication
krb5i Use Kerberos authentication and packet signing
@@ -453,6 +456,8 @@ sec Security mode. Allowed values are:
server requires signing also can be the default)
ntlmv2 Use NTLMv2 password hashing
ntlmv2i Use NTLMv2 password hashing with packet signing
+ lanman (if configured in kernel config) use older
+ lanman hash
The mount.cifs mount helper also accepts a few mount options before -o
including:
@@ -485,14 +490,34 @@ PacketSigningEnabled If set to one, cifs packet signing is enabled
it. If set to two, cifs packet signing is
required even if the server considers packet
signing optional. (default 1)
+SecurityFlags Flags which control security negotiation and
+ also packet signing. Authentication (may/must)
+ flags (e.g. for NTLM and/or NTLMv2) may be combined with
+ the signing flags. Specifying two different password
+ hashing mechanisms (as "must use") on the other hand
+ does not make much sense. Default flags are
+ 0x07007
+ (NTLM, NTLMv2 and packet signing allowed). Maximum
+ allowable flags if you want to allow mounts to servers
+ using weaker password hashes is 0x37037 (lanman,
+ plaintext, ntlm, ntlmv2, signing allowed):
+
+ may use packet signing 0x00001
+ must use packet signing 0x01001
+ may use NTLM (most common password hash) 0x00002
+ must use NTLM 0x02002
+ may use NTLMv2 0x00004
+ must use NTLMv2 0x04004
+ may use Kerberos security (not implemented yet) 0x00008
+ must use Kerberos (not implemented yet) 0x08008
+ may use lanman (weak) password hash 0x00010
+ must use lanman password hash 0x10010
+ may use plaintext passwords 0x00020
+ must use plaintext passwords 0x20020
+ (reserved for future packet encryption) 0x00040
+
cifsFYI If set to one, additional debug information is
logged to the system error log. (default 0)
-ExtendedSecurity If set to one, SPNEGO session establishment
- is allowed which enables more advanced
- secure CIFS session establishment (default 0)
-NTLMV2Enabled If set to one, more secure password hashes
- are used when the server supports them and
- when kerberos is not negotiated (default 0)
traceSMB If set to one, debug information is logged to the
system error log with the start of smb requests
and responses (default 0)
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 086ae8f4a207..031cdf293256 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -467,7 +467,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
asn1_open(&ctx, security_blob, length);
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit header "));
+ cFYI(1, ("Error decoding negTokenInit header"));
return 0;
} else if ((cls != ASN1_APL) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -495,7 +495,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit "));
+ cFYI(1, ("Error decoding negTokenInit"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -505,7 +505,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit "));
+ cFYI(1, ("Error decoding negTokenInit"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
@@ -515,7 +515,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+ cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -527,7 +527,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+ cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f4124a32bef8..96abeb738978 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -39,7 +39,7 @@ cifs_dump_mem(char *label, void *data, int length)
char *charptr = data;
char buf[10], line[80];
- printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n\n",
+ printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n",
label, length, data);
for (i = 0; i < length; i += 16) {
line[0] = 0;
@@ -57,6 +57,57 @@ cifs_dump_mem(char *label, void *data, int length)
}
}
+#ifdef CONFIG_CIFS_DEBUG2
+void cifs_dump_detail(struct smb_hdr * smb)
+{
+ cERROR(1,("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
+ smb->Command, smb->Status.CifsError,
+ smb->Flags, smb->Flags2, smb->Mid, smb->Pid));
+ cERROR(1,("smb buf %p len %d", smb, smbCalcSize_LE(smb)));
+}
+
+
+void cifs_dump_mids(struct TCP_Server_Info * server)
+{
+ struct list_head *tmp;
+ struct mid_q_entry * mid_entry;
+
+ if(server == NULL)
+ return;
+
+ cERROR(1,("Dump pending requests:"));
+ spin_lock(&GlobalMid_Lock);
+ list_for_each(tmp, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ if(mid_entry) {
+ cERROR(1,("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ mid_entry->midState,
+ (int)mid_entry->command,
+ mid_entry->pid,
+ mid_entry->tsk,
+ mid_entry->mid));
+#ifdef CONFIG_CIFS_STATS2
+ cERROR(1,("IsLarge: %d buf: %p time rcv: %ld now: %ld",
+ mid_entry->largeBuf,
+ mid_entry->resp_buf,
+ mid_entry->when_received,
+ jiffies));
+#endif /* STATS2 */
+ cERROR(1,("IsMult: %d IsEnd: %d", mid_entry->multiRsp,
+ mid_entry->multiEnd));
+ if(mid_entry->resp_buf) {
+ cifs_dump_detail(mid_entry->resp_buf);
+ cifs_dump_mem("existing buf: ",
+ mid_entry->resp_buf,
+ 62 /* fixme */);
+ }
+
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+}
+#endif /* CONFIG_CIFS_DEBUG2 */
+
#ifdef CONFIG_PROC_FS
static int
cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
@@ -73,7 +124,6 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
*beginBuffer = buf + offset;
-
length =
sprintf(buf,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -395,12 +445,12 @@ static read_proc_t traceSMB_read;
static write_proc_t traceSMB_write;
static read_proc_t multiuser_mount_read;
static write_proc_t multiuser_mount_write;
-static read_proc_t extended_security_read;
-static write_proc_t extended_security_write;
-static read_proc_t ntlmv2_enabled_read;
+static read_proc_t security_flags_read;
+static write_proc_t security_flags_write;
+/* static read_proc_t ntlmv2_enabled_read;
static write_proc_t ntlmv2_enabled_write;
static read_proc_t packet_signing_enabled_read;
-static write_proc_t packet_signing_enabled_write;
+static write_proc_t packet_signing_enabled_write;*/
static read_proc_t experimEnabled_read;
static write_proc_t experimEnabled_write;
static read_proc_t linuxExtensionsEnabled_read;
@@ -458,10 +508,10 @@ cifs_proc_init(void)
pde->write_proc = multiuser_mount_write;
pde =
- create_proc_read_entry("ExtendedSecurity", 0, proc_fs_cifs,
- extended_security_read, NULL);
+ create_proc_read_entry("SecurityFlags", 0, proc_fs_cifs,
+ security_flags_read, NULL);
if (pde)
- pde->write_proc = extended_security_write;
+ pde->write_proc = security_flags_write;
pde =
create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -469,7 +519,7 @@ cifs_proc_init(void)
if (pde)
pde->write_proc = lookupFlag_write;
- pde =
+/* pde =
create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs,
ntlmv2_enabled_read, NULL);
if (pde)
@@ -479,7 +529,7 @@ cifs_proc_init(void)
create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs,
packet_signing_enabled_read, NULL);
if (pde)
- pde->write_proc = packet_signing_enabled_write;
+ pde->write_proc = packet_signing_enabled_write;*/
}
void
@@ -496,9 +546,9 @@ cifs_proc_clean(void)
#endif
remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("OplockEnabled", proc_fs_cifs);
- remove_proc_entry("NTLMV2Enabled",proc_fs_cifs);
- remove_proc_entry("ExtendedSecurity",proc_fs_cifs);
- remove_proc_entry("PacketSigningEnabled",proc_fs_cifs);
+/* remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); */
+ remove_proc_entry("SecurityFlags",proc_fs_cifs);
+/* remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); */
remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs);
remove_proc_entry("Experimental",proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled",proc_fs_cifs);
@@ -782,12 +832,12 @@ multiuser_mount_write(struct file *file, const char __user *buffer,
}
static int
-extended_security_read(char *page, char **start, off_t off,
+security_flags_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len;
- len = sprintf(page, "%d\n", extended_security);
+ len = sprintf(page, "0x%x\n", extended_security);
len -= off;
*start = page + off;
@@ -803,24 +853,52 @@ extended_security_read(char *page, char **start, off_t off,
return len;
}
static int
-extended_security_write(struct file *file, const char __user *buffer,
+security_flags_write(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
+ unsigned int flags;
+ char flags_string[12];
char c;
- int rc;
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- extended_security = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- extended_security = 1;
+ if((count < 1) || (count > 11))
+ return -EINVAL;
+
+ memset(flags_string, 0, 12);
+
+ if(copy_from_user(flags_string, buffer, count))
+ return -EFAULT;
+
+ if(count < 3) {
+ /* single char or single char followed by null */
+ c = flags_string[0];
+ if (c == '0' || c == 'n' || c == 'N')
+ extended_security = CIFSSEC_DEF; /* default */
+ else if (c == '1' || c == 'y' || c == 'Y')
+ extended_security = CIFSSEC_MAX;
+ return count;
+ }
+ /* else we have a number */
+
+ flags = simple_strtoul(flags_string, NULL, 0);
+
+ cFYI(1,("sec flags 0x%x", flags));
+
+ if(flags <= 0) {
+ cERROR(1,("invalid security flags %s",flags_string));
+ return -EINVAL;
+ }
+ if(flags & ~CIFSSEC_MASK) {
+ cERROR(1,("attempt to set unsupported security flags 0x%x",
+ flags & ~CIFSSEC_MASK));
+ return -EINVAL;
+ }
+ /* flags look ok - update the global security flags for cifs module */
+ extended_security = flags;
return count;
}
-static int
+/* static int
ntlmv2_enabled_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@@ -855,6 +933,8 @@ ntlmv2_enabled_write(struct file *file, const char __user *buffer,
ntlmv2_support = 0;
else if (c == '1' || c == 'y' || c == 'Y')
ntlmv2_support = 1;
+ else if (c == '2')
+ ntlmv2_support = 2;
return count;
}
@@ -898,7 +978,7 @@ packet_signing_enabled_write(struct file *file, const char __user *buffer,
sign_CIFS_PDUs = 2;
return count;
-}
+} */
#endif
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 4304d9dcfb6c..c26cd0d2c6d5 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,6 +24,10 @@
#define _H_CIFS_DEBUG
void cifs_dump_mem(char *label, void *data, int length);
+#ifdef CONFIG_CIFS_DEBUG2
+void cifs_dump_detail(struct smb_hdr *);
+void cifs_dump_mids(struct TCP_Server_Info *);
+#endif
extern int traceSMB; /* flag which enables the function below */
void dump_smb(struct smb_hdr *, int);
#define CIFS_INFO 0x01
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index d2b128255944..d2a8b2941fc2 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -22,6 +22,7 @@
#include "cifs_unicode.h"
#include "cifs_uniupr.h"
#include "cifspdu.h"
+#include "cifsglob.h"
#include "cifs_debug.h"
/*
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e7d63737e651..a89efaf78a26 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -26,6 +26,8 @@
#include "md5.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
+#include <linux/ctype.h>
+#include <linux/random.h>
/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
/* the 16 byte signature must be allocated by the caller */
@@ -35,6 +37,8 @@
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
+extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
+ unsigned char *p24);
static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
const char * key, char * signature)
@@ -45,7 +49,7 @@ static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
MD5Final(signature,&context);
return 0;
@@ -90,7 +94,7 @@ static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
for(i=0;i<n_vec;i++) {
if(iov[i].iov_base == NULL) {
cERROR(1,("null iovec entry"));
@@ -204,11 +208,12 @@ int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
E_md4hash(password, temp_key);
mdfour(key,temp_key,16);
- memcpy(key+16,rn, CIFS_SESSION_KEY_SIZE);
+ memcpy(key+16,rn, CIFS_SESS_KEY_SIZE);
return 0;
}
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_info)
+int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses,
+ const struct nls_table * nls_info)
{
char temp_hash[16];
struct HMACMD5Context ctx;
@@ -225,6 +230,8 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
user_name_len = strlen(ses->userName);
if(user_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
+ if(ses->domainName == NULL)
+ return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
dom_name_len = strlen(ses->domainName);
if(dom_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
@@ -259,16 +266,131 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
kfree(unicode_buf);
return 0;
}
-void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_response)
+
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
+{
+ int i;
+ char password_with_pad[CIFS_ENCPWD_SIZE];
+
+ if(ses->server == NULL)
+ return;
+
+ memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+ strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
+
+ if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if(extended_security & CIFSSEC_MAY_PLNTXT) {
+ memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE);
+ return;
+ }
+
+ /* calculate old style session key */
+ /* calling toupper is less broken than repeatedly
+ calling nls_toupper would be since that will never
+ work for UTF8, but neither handles multibyte code pages
+ but the only alternative would be converting to UCS-16 (Unicode)
+ (using a routine something like UniStrupr) then
+ uppercasing and then converting back from Unicode - which
+ would only worth doing it if we knew it were utf8. Basically
+ utf8 and other multibyte codepages each need their own strupper
+ function since a byte at a time will ont work. */
+
+ for(i = 0; i < CIFS_ENCPWD_SIZE; i++) {
+ password_with_pad[i] = toupper(password_with_pad[i]);
+ }
+
+ SMBencrypt(password_with_pad, ses->server->cryptKey, lnm_session_key);
+ /* clear password before we return/free memory */
+ memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+}
+#endif /* CIFS_WEAK_PW_HASH */
+
+static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int len;
+ char nt_hash[16];
+ struct HMACMD5Context * pctxt;
+ wchar_t * user;
+ wchar_t * domain;
+
+ pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
+
+ if(pctxt == NULL)
+ return -ENOMEM;
+
+ /* calculate md4 hash of password */
+ E_md4hash(ses->password, nt_hash);
+
+ /* convert Domainname to unicode and uppercase */
+ hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
+
+ /* convert ses->userName to unicode and uppercase */
+ len = strlen(ses->userName);
+ user = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if(user == NULL)
+ goto calc_exit_2;
+ len = cifs_strtoUCS(user, ses->userName, len, nls_cp);
+ UniStrupr(user);
+ hmac_md5_update((char *)user, 2*len, pctxt);
+
+ /* convert ses->domainName to unicode and uppercase */
+ if(ses->domainName) {
+ len = strlen(ses->domainName);
+
+ domain = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if(domain == NULL)
+ goto calc_exit_1;
+ len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp);
+ UniStrupr(domain);
+
+ hmac_md5_update((char *)domain, 2*len, pctxt);
+
+ kfree(domain);
+ }
+calc_exit_1:
+ kfree(user);
+calc_exit_2:
+ /* BB FIXME what about bytes 24 through 40 of the signing key?
+ compare with the NTLM example */
+ hmac_md5_final(ses->server->mac_signing_key, pctxt);
+
+ return rc;
+}
+
+void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf,
+ const struct nls_table * nls_cp)
+{
+ int rc;
+ struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf;
+
+ buf->blob_signature = cpu_to_le32(0x00000101);
+ buf->reserved = 0;
+ buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
+ buf->reserved2 = 0;
+ buf->names[0].type = 0;
+ buf->names[0].length = 0;
+
+ /* calculate buf->ntlmv2_hash */
+ rc = calc_ntlmv2_hash(ses, nls_cp);
+ if(rc)
+ cERROR(1,("could not get v2 hash rc %d",rc));
+ CalcNTLMv2_response(ses, resp_buf);
+}
+
+void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response)
{
struct HMACMD5Context context;
+ /* rest of v2 struct already generated */
memcpy(v2_session_response + 8, ses->server->cryptKey,8);
- /* gen_blob(v2_session_response + 16); */
hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context);
- hmac_md5_update(ses->server->cryptKey,8,&context);
-/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
+ hmac_md5_update(v2_session_response+8,
+ sizeof(struct ntlmv2_resp) - 8, &context);
hmac_md5_final(v2_session_response,&context);
- cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
+/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b4de6eaabd0..c28ede599946 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,8 +56,8 @@ unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
-unsigned int extended_security = 0;
-unsigned int ntlmv2_support = 0;
+unsigned int extended_security = CIFSSEC_DEF;
+/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
extern struct task_struct * oplockThread; /* remove sparse warning */
struct task_struct * oplockThread = NULL;
@@ -908,7 +908,7 @@ static int cifs_dnotify_thread(void * dummyarg)
struct cifsSesInfo *ses;
do {
- if(try_to_freeze())
+ if (try_to_freeze())
continue;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(15*HZ);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d56c0577c710..8f75c6f24701 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -32,7 +32,8 @@
#define TRUE 1
#endif
-extern struct address_space_operations cifs_addr_ops;
+extern const struct address_space_operations cifs_addr_ops;
+extern const struct address_space_operations cifs_addr_ops_smallbuf;
/* Functions related to super block operations */
extern struct super_operations cifs_super_ops;
@@ -99,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern int cifs_ioctl (struct inode * inode, struct file * filep,
unsigned int command, unsigned long arg);
-#define CIFS_VERSION "1.43"
+#define CIFS_VERSION "1.44"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 006eb33bff5f..6d7cf5f3bc0b 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -88,7 +88,8 @@ enum statusEnum {
};
enum securityEnum {
- NTLM = 0, /* Legacy NTLM012 auth with NTLM hash */
+ LANMAN = 0, /* Legacy LANMAN auth */
+ NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO */
NTLMSSP, /* NTLMSSP via SPNEGO */
@@ -157,7 +158,7 @@ struct TCP_Server_Info {
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* needed for CIFS PDU signature */
- char mac_signing_key[CIFS_SESSION_KEY_SIZE + 16];
+ char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
};
/*
@@ -179,10 +180,13 @@ struct cifsUidInfo {
struct cifsSesInfo {
struct list_head cifsSessionList;
struct semaphore sesSem;
+#if 0
struct cifsUidInfo *uidInfo; /* pointer to user info */
+#endif
struct TCP_Server_Info *server; /* pointer to server info */
atomic_t inUse; /* # of mounts (tree connections) on this ses */
enum statusEnum status;
+ unsigned overrideSecFlg; /* if non-zero override global sec flags */
__u16 ipc_tid; /* special tid for connection to IPC share */
__u16 flags;
char *serverOS; /* name of operating system underlying server */
@@ -194,7 +198,7 @@ struct cifsSesInfo {
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
char userName[MAX_USERNAME_SIZE + 1];
- char domainName[MAX_USERNAME_SIZE + 1];
+ char * domainName;
char * password;
};
/* session flags */
@@ -209,12 +213,12 @@ struct cifsTconInfo {
struct list_head openFileList;
struct semaphore tconSem;
struct cifsSesInfo *ses; /* pointer to session associated with */
- char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource (in ASCII not UTF) */
+ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
__u16 tid; /* The 2 byte tree id */
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
- atomic_t useCount; /* how many mounts (explicit or implicit) to this share */
+ atomic_t useCount; /* how many explicit/implicit mounts to share */
#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
atomic_t num_writes;
@@ -254,7 +258,7 @@ struct cifsTconInfo {
spinlock_t stat_lock;
#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
- FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if file system name truncated */
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
unsigned retry:1;
unsigned nocase:1;
@@ -305,7 +309,6 @@ struct cifsFileInfo {
atomic_t wrtPending; /* handle in use - defer close */
struct semaphore fh_sem; /* prevents reopen race after dead ses*/
char * search_resume_name; /* BB removeme BB */
- unsigned int resume_name_length; /* BB removeme - field renamed and moved BB */
struct cifs_search_info srch_inf;
};
@@ -391,9 +394,9 @@ struct mid_q_entry {
struct smb_hdr *resp_buf; /* response buffer */
int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */
- unsigned multiPart:1; /* multiple responses to one SMB request */
unsigned largeBuf:1; /* if valid response, is pointer to large buf */
- unsigned multiResp:1; /* multiple trans2 responses for one request */
+ unsigned multiRsp:1; /* multiple trans2 responses for one request */
+ unsigned multiEnd:1; /* both received */
};
struct oplock_q_entry {
@@ -430,15 +433,35 @@ struct dir_notify_req {
#define CIFS_LARGE_BUFFER 2
#define CIFS_IOVEC 4 /* array of response buffers */
-/* Type of session setup needed */
-#define CIFS_PLAINTEXT 0
-#define CIFS_LANMAN 1
-#define CIFS_NTLM 2
-#define CIFS_NTLMSSP_NEG 3
-#define CIFS_NTLMSSP_AUTH 4
-#define CIFS_SPNEGO_INIT 5
-#define CIFS_SPNEGO_TARG 6
-
+/* Security Flags: indicate type of session setup needed */
+#define CIFSSEC_MAY_SIGN 0x00001
+#define CIFSSEC_MAY_NTLM 0x00002
+#define CIFSSEC_MAY_NTLMV2 0x00004
+#define CIFSSEC_MAY_KRB5 0x00008
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFSSEC_MAY_LANMAN 0x00010
+#define CIFSSEC_MAY_PLNTXT 0x00020
+#endif /* weak passwords */
+#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
+
+#define CIFSSEC_MUST_SIGN 0x01001
+/* note that only one of the following can be set so the
+result of setting MUST flags more than once will be to
+require use of the stronger protocol */
+#define CIFSSEC_MUST_NTLM 0x02002
+#define CIFSSEC_MUST_NTLMV2 0x04004
+#define CIFSSEC_MUST_KRB5 0x08008
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFSSEC_MUST_LANMAN 0x10010
+#define CIFSSEC_MUST_PLNTXT 0x20020
+#define CIFSSEC_MASK 0x37037 /* current flags supported if weak */
+#else
+#define CIFSSEC_MASK 0x07007 /* flags supported if no weak config */
+#endif /* WEAK_PW_HASH */
+#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
+
+#define CIFSSEC_DEF CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2
+#define CIFSSEC_MAX CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2
/*
*****************************************************************
* All constants go here
@@ -500,16 +523,16 @@ GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
GLOBAL_EXTERN struct list_head GlobalOplock_Q;
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */
-GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; /* Dir notify response queue */
+GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */
/*
* Global transaction id (XID) information
*/
GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
-GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
-GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above and list operations */
- /* on midQ entries */
+GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
+ /* on midQ entries */
GLOBAL_EXTERN char Local_System_Name[15];
/*
@@ -531,7 +554,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
+GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
to be established on existing mount if we
have the uid/password or Kerberos credential
or equivalent for current user */
@@ -540,8 +563,8 @@ GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
-GLOBAL_EXTERN unsigned int ntlmv2_support; /* better optional password hash */
GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
+GLOBAL_EXTERN unsigned int secFlags;
GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b2233ac05bd2..86239023545b 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSPDU_H
@@ -24,8 +24,14 @@
#include <net/sock.h>
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define LANMAN_PROT 0
+#define CIFS_PROT 1
+#else
#define CIFS_PROT 0
-#define BAD_PROT CIFS_PROT+1
+#endif
+#define POSIX_PROT CIFS_PROT+1
+#define BAD_PROT 0xFFFF
/* SMB command codes */
/* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
@@ -110,7 +116,7 @@
/*
* Size of the session key (crypto key encrypted with the password
*/
-#define CIFS_SESSION_KEY_SIZE (24)
+#define CIFS_SESS_KEY_SIZE (24)
/*
* Maximum user name length
@@ -400,6 +406,29 @@ typedef struct negotiate_req {
unsigned char DialectsArray[1];
} __attribute__((packed)) NEGOTIATE_REQ;
+/* Dialect index is 13 for LANMAN */
+
+typedef struct lanman_neg_rsp {
+ struct smb_hdr hdr; /* wct = 13 */
+ __le16 DialectIndex;
+ __le16 SecurityMode;
+ __le16 MaxBufSize;
+ __le16 MaxMpxCount;
+ __le16 MaxNumberVcs;
+ __le16 RawMode;
+ __le32 SessionKey;
+ __le32 ServerTime;
+ __le16 ServerTimeZone;
+ __le16 EncryptionKeyLength;
+ __le16 Reserved;
+ __u16 ByteCount;
+ unsigned char EncryptionKey[1];
+} __attribute__((packed)) LANMAN_NEG_RSP;
+
+#define READ_RAW_ENABLE 1
+#define WRITE_RAW_ENABLE 2
+#define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
+
typedef struct negotiate_rsp {
struct smb_hdr hdr; /* wct = 17 */
__le16 DialectIndex;
@@ -509,7 +538,7 @@ typedef union smb_com_session_setup_andx {
/* unsigned char * NativeOS; */
/* unsigned char * NativeLanMan; */
/* unsigned char * PrimaryDomain; */
- } __attribute__((packed)) resp; /* NTLM response format (with or without extended security */
+ } __attribute__((packed)) resp; /* NTLM response with or without extended sec*/
struct { /* request format */
struct smb_hdr hdr; /* wct = 10 */
@@ -520,8 +549,8 @@ typedef union smb_com_session_setup_andx {
__le16 MaxMpxCount;
__le16 VcNumber;
__u32 SessionKey;
- __le16 PassswordLength;
- __u32 Reserved;
+ __le16 PasswordLength;
+ __u32 Reserved; /* encrypt key len and offset */
__le16 ByteCount;
unsigned char AccountPassword[1]; /* followed by */
/* STRING AccountName */
@@ -543,6 +572,26 @@ typedef union smb_com_session_setup_andx {
} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
} __attribute__((packed)) SESSION_SETUP_ANDX;
+/* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */
+
+struct ntlmssp2_name {
+ __le16 type;
+ __le16 length;
+/* char name[length]; */
+} __attribute__((packed));
+
+struct ntlmv2_resp {
+ char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+ __le32 blob_signature;
+ __u32 reserved;
+ __le64 time;
+ __u64 client_chal; /* random */
+ __u32 reserved2;
+ struct ntlmssp2_name names[1];
+ /* array of name entries could follow ending in minimum 4 byte struct */
+} __attribute__((packed));
+
+
#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
/* Capabilities bits (for NTLM SessSetup request) */
@@ -573,7 +622,9 @@ typedef struct smb_com_tconx_req {
} __attribute__((packed)) TCONX_REQ;
typedef struct smb_com_tconx_rsp {
- struct smb_hdr hdr; /* wct = 3 *//* note that Win2000 has sent wct=7 in some cases on responses. Four unspecified words followed OptionalSupport */
+ struct smb_hdr hdr; /* wct = 3 note that Win2000 has sent wct = 7
+ in some cases on responses. Four unspecified
+ words followed OptionalSupport */
__u8 AndXCommand;
__u8 AndXReserved;
__le16 AndXOffset;
@@ -1323,6 +1374,9 @@ struct smb_t2_rsp {
#define SMB_FILE_MAXIMUM_INFO 0x40d
/* Find File infolevels */
+#define SMB_FIND_FILE_INFO_STANDARD 0x001
+#define SMB_FIND_FILE_QUERY_EA_SIZE 0x002
+#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
#define SMB_FIND_FILE_DIRECTORY_INFO 0x101
#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
#define SMB_FIND_FILE_NAMES_INFO 0x103
@@ -1844,13 +1898,13 @@ typedef struct {
typedef struct {
__le32 DeviceType;
__le32 DeviceCharacteristics;
-} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info, level 0x104 */
+} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
typedef struct {
__le32 Attributes;
__le32 MaxPathNameComponentLength;
__le32 FileSystemNameLen;
- char FileSystemName[52]; /* do not really need to save this - so potentially get only subset of name */
+ char FileSystemName[52]; /* do not have to save this - get subset? */
} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO;
/******************************************************************************/
@@ -1947,7 +2001,8 @@ typedef struct {
struct file_allocation_info {
__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
-} __attribute__((packed)); /* size used on disk, level 0x103 for set, 0x105 for query */
+} __attribute__((packed)); /* size used on disk, for level 0x103 for set,
+ 0x105 for query */
struct file_end_of_file_info {
__le64 FileSize; /* offset to end of file */
@@ -2054,7 +2109,7 @@ typedef struct {
__le32 ExtFileAttributes;
__le32 FileNameLength;
char FileName[1];
-} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF response data area */
+} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF resp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2069,7 +2124,7 @@ typedef struct {
__le32 FileNameLength;
__le32 EaSize; /* length of the xattrs */
char FileName[1];
-} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 FF response data area */
+} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2086,7 +2141,7 @@ typedef struct {
__le32 Reserved;
__u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
char FileName[1];
-} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF response data area */
+} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2104,7 +2159,22 @@ typedef struct {
__u8 Reserved;
__u8 ShortName[12];
char FileName[1];
-} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FF response data area */
+} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
+
+typedef struct {
+ __u32 ResumeKey;
+ __le16 CreationDate; /* SMB Date */
+ __le16 CreationTime; /* SMB Time */
+ __le16 LastAccessDate;
+ __le16 LastAccessTime;
+ __le16 LastWriteDate;
+ __le16 LastWriteTime;
+ __le32 DataSize; /* File Size (EOF) */
+ __le32 AllocationSize;
+ __le16 Attributes; /* verify not u32 */
+ __u8 FileNameLength;
+ char FileName[1];
+} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
struct win_dev {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 310ea2f0e0bf..a5ddc62d6fe6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -64,14 +64,12 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
fixed section (word count) in two byte units */);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
void ** request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
- const int stage, int * pNTLMv2_flg,
+ const int stage,
const struct nls_table *nls_cp);
-#endif
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
@@ -285,8 +283,14 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
__u32 expected_sequence_number);
extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
-extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
+extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
+ const struct nls_table *);
+extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * );
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+ const struct nls_table *);
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key);
+#endif /* CIFS_WEAK_PW_HASH */
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 925881e00ff2..19678c575dfc 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -44,8 +44,11 @@ static struct {
int index;
char *name;
} protocols[] = {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ {LANMAN_PROT, "\2LM1.2X002"},
+#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
- {CIFS_PROT, "\2POSIX 2"},
+ {POSIX_PROT, "\2POSIX 2"},
{BAD_PROT, "\2"}
};
#else
@@ -53,11 +56,29 @@ static struct {
int index;
char *name;
} protocols[] = {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ {LANMAN_PROT, "\2LM1.2X002"},
+#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
{BAD_PROT, "\2"}
};
#endif
+/* define the number of elements in the cifs dialect array */
+#ifdef CONFIG_CIFS_POSIX
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFS_NUM_PROT 3
+#else
+#define CIFS_NUM_PROT 2
+#endif /* CIFS_WEAK_PW_HASH */
+#else /* not posix */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFS_NUM_PROT 2
+#else
+#define CIFS_NUM_PROT 1
+#endif /* CONFIG_CIFS_WEAK_PW_HASH */
+#endif /* CIFS_POSIX */
+
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -188,7 +209,6 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
return rc;
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
int
small_smb_init_no_tc(const int smb_command, const int wct,
struct cifsSesInfo *ses, void **request_buf)
@@ -214,7 +234,6 @@ small_smb_init_no_tc(const int smb_command, const int wct,
return rc;
}
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
/* If the return code is zero, this function must fill in request_buf pointer */
static int
@@ -322,7 +341,8 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* potential retries of smb operations it turns out we can determine */
/* from the mid flags when the request buffer can be resent without */
/* having to use a second distinct buffer for the response */
- *response_buf = *request_buf;
+ if(response_buf)
+ *response_buf = *request_buf;
header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
wct /*wct */ );
@@ -373,8 +393,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
NEGOTIATE_RSP *pSMBr;
int rc = 0;
int bytes_returned;
+ int i;
struct TCP_Server_Info * server;
u16 count;
+ unsigned int secFlags;
if(ses->server)
server = ses->server;
@@ -386,101 +408,200 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
+
+ /* if any of auth flags (ie not sign or seal) are overriden use them */
+ if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ secFlags = ses->overrideSecFlg;
+ else /* if override flags set only sign/seal OR them with global auth */
+ secFlags = extended_security | ses->overrideSecFlg;
+
+ cFYI(1,("secFlags 0x%x",secFlags));
+
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
- if (extended_security)
+ if((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
-
- count = strlen(protocols[0].name) + 1;
- strncpy(pSMB->DialectsArray, protocols[0].name, 30);
- /* null guaranteed to be at end of source and target buffers anyway */
-
+
+ count = 0;
+ for(i=0;i<CIFS_NUM_PROT;i++) {
+ strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
+ count += strlen(protocols[i].name) + 1;
+ /* null at end of source and target buffers anyway */
+ }
pSMB->hdr.smb_buf_length += count;
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc == 0) {
- server->secMode = pSMBr->SecurityMode;
- if((server->secMode & SECMODE_USER) == 0)
- cFYI(1,("share mode security"));
- server->secType = NTLM; /* BB override default for
- NTLMv2 or kerberos v5 */
- /* one byte - no need to convert this or EncryptionKeyLen
- from little endian */
- server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
- /* probably no need to store and check maxvcs */
- server->maxBuf =
- min(le32_to_cpu(pSMBr->MaxBufferSize),
+ if (rc != 0)
+ goto neg_err_exit;
+
+ cFYI(1,("Dialect: %d", pSMBr->DialectIndex));
+ /* Check wct = 1 error case */
+ if((pSMBr->hdr.WordCount < 13) || (pSMBr->DialectIndex == BAD_PROT)) {
+ /* core returns wct = 1, but we do not ask for core - otherwise
+ small wct just comes when dialect index is -1 indicating we
+ could not negotiate a common dialect */
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ } else if((pSMBr->hdr.WordCount == 13)
+ && (pSMBr->DialectIndex == LANMAN_PROT)) {
+ struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
+
+ if((secFlags & CIFSSEC_MAY_LANMAN) ||
+ (secFlags & CIFSSEC_MAY_PLNTXT))
+ server->secType = LANMAN;
+ else {
+ cERROR(1, ("mount failed weak security disabled"
+ " in /proc/fs/cifs/SecurityFlags"));
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+ }
+ server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
+ server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
+ server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
+ (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
+ GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
+ /* even though we do not use raw we might as well set this
+ accurately, in case we ever find a need for it */
+ if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
+ server->maxRw = 0xFF00;
+ server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
+ } else {
+ server->maxRw = 0;/* we do not need to use raw anyway */
+ server->capabilities = CAP_MPX_MODE;
+ }
+ server->timeZone = le16_to_cpu(rsp->ServerTimeZone);
+
+ /* BB get server time for time conversions and add
+ code to use it and timezone since this is not UTC */
+
+ if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+ memcpy(server->cryptKey, rsp->EncryptionKey,
+ CIFS_CRYPTO_KEY_SIZE);
+ } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ rc = -EIO; /* need cryptkey unless plain text */
+ goto neg_err_exit;
+ }
+
+ cFYI(1,("LANMAN negotiated"));
+ /* we will not end up setting signing flags - as no signing
+ was in LANMAN and server did not return the flags on */
+ goto signing_check;
+#else /* weak security disabled */
+ } else if(pSMBr->hdr.WordCount == 13) {
+ cERROR(1,("mount failed, cifs module not built "
+ "with CIFS_WEAK_PW_HASH support"));
+ rc = -EOPNOTSUPP;
+#endif /* WEAK_PW_HASH */
+ goto neg_err_exit;
+ } else if(pSMBr->hdr.WordCount != 17) {
+ /* unknown wct */
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+ }
+ /* else wct == 17 NTLM */
+ server->secMode = pSMBr->SecurityMode;
+ if((server->secMode & SECMODE_USER) == 0)
+ cFYI(1,("share mode security"));
+
+ if((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
+#endif /* CIFS_WEAK_PW_HASH */
+ cERROR(1,("Server requests plain text password"
+ " but client support disabled"));
+
+ if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
+ server->secType = NTLMv2;
+ else if(secFlags & CIFSSEC_MAY_NTLM)
+ server->secType = NTLM;
+ else if(secFlags & CIFSSEC_MAY_NTLMV2)
+ server->secType = NTLMv2;
+ /* else krb5 ... any others ... */
+
+ /* one byte, so no need to convert this or EncryptionKeyLen from
+ little endian */
+ server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
+ /* probably no need to store and check maxvcs */
+ server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
- server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
- cFYI(0, ("Max buf = %d", ses->server->maxBuf));
- GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
- server->capabilities = le32_to_cpu(pSMBr->Capabilities);
- server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
- /* BB with UTC do we ever need to be using srvr timezone? */
- if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
- memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
- CIFS_CRYPTO_KEY_SIZE);
- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
- && (pSMBr->EncryptionKeyLength == 0)) {
- /* decode security blob */
- } else
- rc = -EIO;
+ server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
+ cFYI(0, ("Max buf = %d", ses->server->maxBuf));
+ GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
+ server->capabilities = le32_to_cpu(pSMBr->Capabilities);
+ server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
+ if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+ memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
+ CIFS_CRYPTO_KEY_SIZE);
+ } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
+ && (pSMBr->EncryptionKeyLength == 0)) {
+ /* decode security blob */
+ } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ rc = -EIO; /* no crypt key only if plain text pwd */
+ goto neg_err_exit;
+ }
- /* BB might be helpful to save off the domain of server here */
+ /* BB might be helpful to save off the domain of server here */
- if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
- (server->capabilities & CAP_EXTENDED_SECURITY)) {
- count = pSMBr->ByteCount;
- if (count < 16)
- rc = -EIO;
- else if (count == 16) {
- server->secType = RawNTLMSSP;
- if (server->socketUseCount.counter > 1) {
- if (memcmp
- (server->server_GUID,
- pSMBr->u.extended_response.
- GUID, 16) != 0) {
- cFYI(1, ("server UID changed"));
- memcpy(server->
- server_GUID,
- pSMBr->u.
- extended_response.
- GUID, 16);
- }
- } else
+ if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
+ (server->capabilities & CAP_EXTENDED_SECURITY)) {
+ count = pSMBr->ByteCount;
+ if (count < 16)
+ rc = -EIO;
+ else if (count == 16) {
+ server->secType = RawNTLMSSP;
+ if (server->socketUseCount.counter > 1) {
+ if (memcmp(server->server_GUID,
+ pSMBr->u.extended_response.
+ GUID, 16) != 0) {
+ cFYI(1, ("server UID changed"));
memcpy(server->server_GUID,
- pSMBr->u.extended_response.
- GUID, 16);
- } else {
- rc = decode_negTokenInit(pSMBr->u.
- extended_response.
- SecurityBlob,
- count - 16,
- &server->secType);
- if(rc == 1) {
- /* BB Need to fill struct for sessetup here */
- rc = -EOPNOTSUPP;
- } else {
- rc = -EINVAL;
+ pSMBr->u.extended_response.GUID,
+ 16);
}
+ } else
+ memcpy(server->server_GUID,
+ pSMBr->u.extended_response.GUID, 16);
+ } else {
+ rc = decode_negTokenInit(pSMBr->u.extended_response.
+ SecurityBlob,
+ count - 16,
+ &server->secType);
+ if(rc == 1) {
+ /* BB Need to fill struct for sessetup here */
+ rc = -EOPNOTSUPP;
+ } else {
+ rc = -EINVAL;
}
- } else
- server->capabilities &= ~CAP_EXTENDED_SECURITY;
- if(sign_CIFS_PDUs == FALSE) {
- if(server->secMode & SECMODE_SIGN_REQUIRED)
- cERROR(1,
- ("Server requires /proc/fs/cifs/PacketSigningEnabled"));
- server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
- } else if(sign_CIFS_PDUs == 1) {
- if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
- server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
-
+ } else
+ server->capabilities &= ~CAP_EXTENDED_SECURITY;
+
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+signing_check:
+#endif
+ if(sign_CIFS_PDUs == FALSE) {
+ if(server->secMode & SECMODE_SIGN_REQUIRED)
+ cERROR(1,("Server requires "
+ "/proc/fs/cifs/PacketSigningEnabled to be on"));
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+ } else if(sign_CIFS_PDUs == 1) {
+ if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+ } else if(sign_CIFS_PDUs == 2) {
+ if((server->secMode &
+ (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
+ cERROR(1,("signing required but server lacks support"));
+ }
}
-
+neg_err_exit:
cifs_buf_release(pSMB);
+
+ cFYI(1,("negprot rc %d",rc));
return rc;
}
@@ -2239,7 +2360,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
- cFYI(1,("readlink result - %s ",symlinkinfo));
+ cFYI(1,("readlink result - %s",symlinkinfo));
}
}
qreparse_out:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index bae1479318d1..876eb9ef85fe 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -49,8 +49,6 @@
static DECLARE_COMPLETION(cifsd_complete);
-extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
- unsigned char *p24);
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
@@ -70,6 +68,7 @@ struct smb_vol {
gid_t linux_gid;
mode_t file_mode;
mode_t dir_mode;
+ unsigned secFlg;
unsigned rw:1;
unsigned retry:1;
unsigned intr:1;
@@ -83,12 +82,7 @@ struct smb_vol {
unsigned remap:1; /* set to remap seven reserved chars in filenames */
unsigned posix_paths:1; /* unset to not ask for posix pathnames. */
unsigned sfu_emul:1;
- unsigned krb5:1;
- unsigned ntlm:1;
- unsigned ntlmv2:1;
unsigned nullauth:1; /* attempt to authenticate with null user */
- unsigned sign:1;
- unsigned seal:1; /* encrypt */
unsigned nocase; /* request case insensitive filenames */
unsigned nobrl; /* disable sending byte range locks to srv */
unsigned int rsize;
@@ -369,21 +363,21 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
continue;
if (bigbuf == NULL) {
bigbuf = cifs_buf_get();
- if(bigbuf == NULL) {
- cERROR(1,("No memory for large SMB response"));
+ if (!bigbuf) {
+ cERROR(1, ("No memory for large SMB response"));
msleep(3000);
/* retry will check if exiting */
continue;
}
- } else if(isLargeBuf) {
- /* we are reusing a dirtry large buf, clear its start */
+ } else if (isLargeBuf) {
+ /* we are reusing a dirty large buf, clear its start */
memset(bigbuf, 0, sizeof (struct smb_hdr));
}
if (smallbuf == NULL) {
smallbuf = cifs_small_buf_get();
- if(smallbuf == NULL) {
- cERROR(1,("No memory for SMB response"));
+ if (!smallbuf) {
+ cERROR(1, ("No memory for SMB response"));
msleep(1000);
/* retry will check if exiting */
continue;
@@ -403,12 +397,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
kernel_recvmsg(csocket, &smb_msg,
&iov, 1, 4, 0 /* BB see socket.h flags */);
- if(server->tcpStatus == CifsExiting) {
+ if (server->tcpStatus == CifsExiting) {
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1,("Reconnect after server stopped responding"));
+ cFYI(1, ("Reconnect after server stopped responding"));
cifs_reconnect(server);
- cFYI(1,("call to reconnect done"));
+ cFYI(1, ("call to reconnect done"));
csocket = server->ssocket;
continue;
} else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
@@ -417,15 +411,15 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
tcpStatus CifsNeedReconnect if server hung */
continue;
} else if (length <= 0) {
- if(server->tcpStatus == CifsNew) {
- cFYI(1,("tcp session abend after SMBnegprot"));
+ if (server->tcpStatus == CifsNew) {
+ cFYI(1, ("tcp session abend after SMBnegprot"));
/* some servers kill the TCP session rather than
returning an SMB negprot error, in which
case reconnecting here is not going to help,
and so simply return error to mount */
break;
}
- if(length == -EINTR) {
+ if (!try_to_freeze() && (length == -EINTR)) {
cFYI(1,("cifsd thread killed"));
break;
}
@@ -585,9 +579,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
/* merge response - fix up 1st*/
if(coalesce_t2(smb_buffer,
mid_entry->resp_buf)) {
+ mid_entry->multiRsp = 1;
break;
} else {
/* all parts received */
+ mid_entry->multiEnd = 1;
goto multi_t2_fnd;
}
} else {
@@ -632,9 +628,14 @@ multi_t2_fnd:
wake_up_process(task_to_wake);
} else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
&& (isMultiRsp == FALSE)) {
- cERROR(1, ("No task to wake, unknown frame rcvd!"));
+ cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter));
cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
sizeof(struct smb_hdr));
+#ifdef CONFIG_CIFS_DEBUG2
+ cifs_dump_detail(smb_buffer);
+ cifs_dump_mids(server);
+#endif /* CIFS_DEBUG2 */
+
}
} /* end while !EXITING */
@@ -784,7 +785,6 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
vol->rw = TRUE;
- vol->ntlm = TRUE;
/* default is always to request posix paths. */
vol->posix_paths = 1;
@@ -915,30 +915,35 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
cERROR(1,("no security value specified"));
continue;
} else if (strnicmp(value, "krb5i", 5) == 0) {
- vol->sign = 1;
- vol->krb5 = 1;
+ vol->secFlg |= CIFSSEC_MAY_KRB5 |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "krb5p", 5) == 0) {
- /* vol->seal = 1;
- vol->krb5 = 1; */
+ /* vol->secFlg |= CIFSSEC_MUST_SEAL |
+ CIFSSEC_MAY_KRB5; */
cERROR(1,("Krb5 cifs privacy not supported"));
return 1;
} else if (strnicmp(value, "krb5", 4) == 0) {
- vol->krb5 = 1;
+ vol->secFlg |= CIFSSEC_MAY_KRB5;
} else if (strnicmp(value, "ntlmv2i", 7) == 0) {
- vol->ntlmv2 = 1;
- vol->sign = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "ntlmv2", 6) == 0) {
- vol->ntlmv2 = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
} else if (strnicmp(value, "ntlmi", 5) == 0) {
- vol->ntlm = 1;
- vol->sign = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLM |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "ntlm", 4) == 0) {
/* ntlm is default so can be turned off too */
- vol->ntlm = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLM;
} else if (strnicmp(value, "nontlm", 6) == 0) {
- vol->ntlm = 0;
+ /* BB is there a better way to do this? */
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ } else if (strnicmp(value, "lanman", 6) == 0) {
+ vol->secFlg |= CIFSSEC_MAY_LANMAN;
+#endif
} else if (strnicmp(value, "none", 4) == 0) {
- vol->nullauth = 1;
+ vol->nullauth = 1;
} else {
cERROR(1,("bad security option: %s", value));
return 1;
@@ -976,7 +981,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
}
/* BB are there cases in which a comma can be valid in
a domain name and need special handling? */
- if (strnlen(value, 65) < 65) {
+ if (strnlen(value, 256) < 256) {
vol->domainname = value;
cFYI(1, ("Domain name set"));
} else {
@@ -1168,6 +1173,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->no_psx_acl = 0;
} else if (strnicmp(data, "noacl",5) == 0) {
vol->no_psx_acl = 1;
+ } else if (strnicmp(data, "sign",4) == 0) {
+ vol->secFlg |= CIFSSEC_MUST_SIGN;
+/* } else if (strnicmp(data, "seal",4) == 0) {
+ vol->secFlg |= CIFSSEC_MUST_SEAL; */
} else if (strnicmp(data, "direct",6) == 0) {
vol->direct_io = 1;
} else if (strnicmp(data, "forcedirectio",13) == 0) {
@@ -1762,11 +1771,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (volume_info.username)
strncpy(pSesInfo->userName,
volume_info.username,MAX_USERNAME_SIZE);
- if (volume_info.domainname)
- strncpy(pSesInfo->domainName,
- volume_info.domainname,MAX_USERNAME_SIZE);
+ if (volume_info.domainname) {
+ int len = strlen(volume_info.domainname);
+ pSesInfo->domainName =
+ kmalloc(len + 1, GFP_KERNEL);
+ if(pSesInfo->domainName)
+ strcpy(pSesInfo->domainName,
+ volume_info.domainname);
+ }
pSesInfo->linux_uid = volume_info.linux_uid;
+ pSesInfo->overrideSecFlg = volume_info.secFlg;
down(&pSesInfo->sesSem);
+ /* BB FIXME need to pass vol->secFlgs BB */
rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
up(&pSesInfo->sesSem);
if(!rc)
@@ -1980,7 +1996,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
static int
CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
- char session_key[CIFS_SESSION_KEY_SIZE],
+ char session_key[CIFS_SESS_KEY_SIZE],
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
@@ -2038,15 +2054,15 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
pSMB->req_no_secext.CaseInsensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
bcc_ptr = pByteArea(smb_buffer);
- memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
- memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
+ memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
if (ses->capabilities & CAP_UNICODE) {
if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */
@@ -2054,7 +2070,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
}
if(user == NULL)
- bytes_returned = 0; /* skill null user */
+ bytes_returned = 0; /* skip null user */
else
bytes_returned =
cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
@@ -2162,8 +2178,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if (remaining_words > 0) {
len = UniStrnlen((wchar_t *)bcc_ptr,
remaining_words-1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
if(ses->serverNOS == NULL)
goto sesssetup_nomem;
@@ -2203,12 +2218,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
/* if these kcallocs fail not much we
can do, but better to not fail the
sesssetup itself */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2, GFP_KERNEL);
}
@@ -2217,8 +2230,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if (((long) bcc_ptr + len) - (long)
pByteArea(smb_buffer_response)
<= BCC(smb_buffer_response)) {
- if(ses->serverOS)
- kfree(ses->serverOS);
+ kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
if(ses->serverOS == NULL)
goto sesssetup_nomem;
@@ -2229,8 +2241,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
if(ses->serverNOS == NULL)
goto sesssetup_nomem;
@@ -2274,292 +2285,6 @@ sesssetup_nomem: /* do not return an error on nomem for the info strings,
}
static int
-CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
- char *SecurityBlob,int SecurityBlobLength,
- const struct nls_table *nls_codepage)
-{
- struct smb_hdr *smb_buffer;
- struct smb_hdr *smb_buffer_response;
- SESSION_SETUP_ANDX *pSMB;
- SESSION_SETUP_ANDX *pSMBr;
- char *bcc_ptr;
- char *user;
- char *domain;
- int rc = 0;
- int remaining_words = 0;
- int bytes_returned = 0;
- int len;
- __u32 capabilities;
- __u16 count;
-
- cFYI(1, ("In spnego sesssetup "));
- if(ses == NULL)
- return -EINVAL;
- user = ses->userName;
- domain = ses->domainName;
-
- smb_buffer = cifs_buf_get();
- if (smb_buffer == NULL) {
- return -ENOMEM;
- }
- smb_buffer_response = smb_buffer;
- pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
-
- /* send SMBsessionSetup here */
- header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
- NULL /* no tCon exists yet */ , 12 /* wct */ );
-
- smb_buffer->Mid = GetNextMid(ses->server);
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- pSMB->req.AndXCommand = 0xFF;
- if(ses->server->maxBuf > 64*1024)
- ses->server->maxBuf = (64*1023);
- pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
- pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
-
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
- capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
- CAP_EXTENDED_SECURITY;
- if (ses->capabilities & CAP_UNICODE) {
- smb_buffer->Flags2 |= SMBFLG2_UNICODE;
- capabilities |= CAP_UNICODE;
- }
- if (ses->capabilities & CAP_STATUS32) {
- smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
- capabilities |= CAP_STATUS32;
- }
- if (ses->capabilities & CAP_DFS) {
- smb_buffer->Flags2 |= SMBFLG2_DFS;
- capabilities |= CAP_DFS;
- }
- pSMB->req.Capabilities = cpu_to_le32(capabilities);
-
- pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
- bcc_ptr = pByteArea(smb_buffer);
- memcpy(bcc_ptr, SecurityBlob, SecurityBlobLength);
- bcc_ptr += SecurityBlobLength;
-
- if (ses->capabilities & CAP_UNICODE) {
- if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode strings */
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, nls_codepage);
- bcc_ptr += 2 * bytes_returned; /* convert num of 16 bit words to bytes */
- bcc_ptr += 2; /* trailing null */
- if (domain == NULL)
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr,
- "CIFS_LINUX_DOM", 32, nls_codepage);
- else
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, domain, 64,
- nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, "Linux version ",
- 32, nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release, 32,
- nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
- 64, nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- } else {
- strncpy(bcc_ptr, user, 200);
- bcc_ptr += strnlen(user, 200);
- *bcc_ptr = 0;
- bcc_ptr++;
- if (domain == NULL) {
- strcpy(bcc_ptr, "CIFS_LINUX_DOM");
- bcc_ptr += strlen("CIFS_LINUX_DOM") + 1;
- } else {
- strncpy(bcc_ptr, domain, 64);
- bcc_ptr += strnlen(domain, 64);
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- strcpy(bcc_ptr, "Linux version ");
- bcc_ptr += strlen("Linux version ");
- strcpy(bcc_ptr, system_utsname.release);
- bcc_ptr += strlen(system_utsname.release) + 1;
- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
- }
- count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
- smb_buffer->smb_buf_length += count;
- pSMB->req.ByteCount = cpu_to_le16(count);
-
- rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
- &bytes_returned, 1);
- if (rc) {
-/* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */
- } else if ((smb_buffer_response->WordCount == 3)
- || (smb_buffer_response->WordCount == 4)) {
- __u16 action = le16_to_cpu(pSMBr->resp.Action);
- __u16 blob_len =
- le16_to_cpu(pSMBr->resp.SecurityBlobLength);
- if (action & GUEST_LOGIN)
- cFYI(1, (" Guest login")); /* BB do we want to set anything in SesInfo struct ? */
- if (ses) {
- ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (le) */
- cFYI(1, ("UID = %d ", ses->Suid));
- bcc_ptr = pByteArea(smb_buffer_response); /* response can have either 3 or 4 word count - Samba sends 3 */
-
- /* BB Fix below to make endian neutral !! */
-
- if ((pSMBr->resp.hdr.WordCount == 3)
- || ((pSMBr->resp.hdr.WordCount == 4)
- && (blob_len <
- pSMBr->resp.ByteCount))) {
- if (pSMBr->resp.hdr.WordCount == 4) {
- bcc_ptr +=
- blob_len;
- cFYI(1,
- ("Security Blob Length %d ",
- blob_len));
- }
-
- if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
- if ((long) (bcc_ptr) % 2) {
- remaining_words =
- (BCC(smb_buffer_response)
- - 1) / 2;
- bcc_ptr++; /* Unicode strings must be word aligned */
- } else {
- remaining_words =
- BCC
- (smb_buffer_response) / 2;
- }
- len =
- UniStrnlen((wchar_t *) bcc_ptr,
- remaining_words - 1);
-/* We look for obvious messed up bcc or strings in response so we do not go off
- the end since (at least) WIN2K and Windows XP have a major bug in not null
- terminating last Unicode string in response */
- if(ses->serverOS)
- kfree(ses->serverOS);
- ses->serverOS =
- kzalloc(2 * (len + 1), GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverOS,
- (__le16 *)
- bcc_ptr, len,
- nls_codepage);
- bcc_ptr += 2 * (len + 1);
- remaining_words -= len + 1;
- ses->serverOS[2 * len] = 0;
- ses->serverOS[1 + (2 * len)] = 0;
- if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *)bcc_ptr,
- remaining_words
- - 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS =
- kzalloc(2 * (len + 1),
- GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverNOS,
- (__le16 *)bcc_ptr,
- len,
- nls_codepage);
- bcc_ptr += 2 * (len + 1);
- ses->serverNOS[2 * len] = 0;
- ses->serverNOS[1 + (2 * len)] = 0;
- remaining_words -= len + 1;
- if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
- /* last string not null terminated (e.g.Windows XP/2000) */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverDomain,
- (__le16 *)bcc_ptr,
- len, nls_codepage);
- bcc_ptr += 2*(len+1);
- ses->serverDomain[2*len] = 0;
- ses->serverDomain[1+(2*len)] = 0;
- } /* else no more room so create dummy domain string */
- else {
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain =
- kzalloc(2,GFP_KERNEL);
- }
- } else {/* no room use dummy domain&NOS */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(2, GFP_KERNEL);
- }
- } else { /* ASCII */
-
- len = strnlen(bcc_ptr, 1024);
- if (((long) bcc_ptr + len) - (long)
- pByteArea(smb_buffer_response)
- <= BCC(smb_buffer_response)) {
- if(ses->serverOS)
- kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
- strncpy(ses->serverOS, bcc_ptr, len);
-
- bcc_ptr += len;
- bcc_ptr[0] = 0; /* null terminate the string */
- bcc_ptr++;
-
- len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
- strncpy(ses->serverNOS, bcc_ptr, len);
- bcc_ptr += len;
- bcc_ptr[0] = 0;
- bcc_ptr++;
-
- len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
- strncpy(ses->serverDomain, bcc_ptr, len);
- bcc_ptr += len;
- bcc_ptr[0] = 0;
- bcc_ptr++;
- } else
- cFYI(1,
- ("Variable field of length %d extends beyond end of smb ",
- len));
- }
- } else {
- cERROR(1,
- (" Security Blob Length extends beyond end of SMB"));
- }
- } else {
- cERROR(1, ("No session structure passed in."));
- }
- } else {
- cERROR(1,
- (" Invalid Word count %d: ",
- smb_buffer_response->WordCount));
- rc = -EIO;
- }
-
- if (smb_buffer)
- cifs_buf_release(smb_buffer);
-
- return rc;
-}
-
-static int
CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
struct cifsSesInfo *ses, int * pNTLMv2_flag,
const struct nls_table *nls_codepage)
@@ -2635,8 +2360,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
/* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
if(sign_CIFS_PDUs)
negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
- if(ntlmv2_support)
- negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
+/* if(ntlmv2_support)
+ negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/
/* setup pointers to domain name and workstation name */
bcc_ptr += SecurityBlobLength;
@@ -2783,8 +2508,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr,
remaining_words
- 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2 * (len + 1),
GFP_KERNEL);
@@ -2802,8 +2526,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
if (remaining_words > 0) {
len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
/* last string is not always null terminated (for e.g. for Windows XP & 2000) */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2 *
(len +
@@ -2822,19 +2545,16 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
= 0;
} /* else no more room so create dummy domain string */
else {
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2,
GFP_KERNEL);
}
} else { /* no room so create dummy domain and NOS string */
- if(ses->serverDomain);
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2, GFP_KERNEL);
}
@@ -2856,8 +2576,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(len + 1,
GFP_KERNEL);
@@ -2867,8 +2586,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(len + 1,
GFP_KERNEL);
@@ -2994,14 +2712,14 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
SecurityBlob->LmChallengeResponse.Buffer = 0;
SecurityBlob->NtChallengeResponse.Length =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
SecurityBlob->NtChallengeResponse.MaximumLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- memcpy(bcc_ptr, ntlm_session_key, CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ memcpy(bcc_ptr, ntlm_session_key, CIFS_SESS_KEY_SIZE);
SecurityBlob->NtChallengeResponse.Buffer =
cpu_to_le32(SecurityBlobLength);
- SecurityBlobLength += CIFS_SESSION_KEY_SIZE;
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
+ SecurityBlobLength += CIFS_SESS_KEY_SIZE;
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
if (ses->capabilities & CAP_UNICODE) {
if (domain == NULL) {
@@ -3190,8 +2908,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr,
remaining_words
- 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2 * (len + 1),
GFP_KERNEL);
@@ -3244,8 +2961,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if(ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain = kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(2, GFP_KERNEL);
}
} else { /* ASCII */
@@ -3263,8 +2979,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
strncpy(ses->serverNOS, bcc_ptr, len);
bcc_ptr += len;
@@ -3340,22 +3055,33 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr = &pSMB->Password[0];
if((ses->server->secMode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
+ *bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
+ /* already aligned so no need to do it below */
} else {
- pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
- weaker LANMAN (which we do not send) is accepted
+ weaker LANMAN (which we do not send by default) is accepted
by Samba (not sure whether other servers allow
NTLMv2 password here) */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ if((extended_security & CIFSSEC_MAY_LANMAN) &&
+ (ses->server->secType == LANMAN))
+ calc_lanman_hash(ses, bcc_ptr);
+ else
+#endif /* CIFS_WEAK_PW_HASH */
SMBNTencrypt(ses->password,
ses->server->cryptKey,
bcc_ptr);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
- *bcc_ptr = 0;
- bcc_ptr++; /* align */
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ if(ses->capabilities & CAP_UNICODE) {
+ /* must align unicode strings */
+ *bcc_ptr = 0; /* null byte password */
+ bcc_ptr++;
+ }
}
if(ses->server->secMode &
@@ -3429,7 +3155,10 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
}
/* else do not bother copying these informational fields */
}
- tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+ if(smb_buffer_response->WordCount == 3)
+ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+ else
+ tcon->Flags = 0;
cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
@@ -3494,7 +3223,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
struct nls_table * nls_info)
{
int rc = 0;
- char ntlm_session_key[CIFS_SESSION_KEY_SIZE];
+ char ntlm_session_key[CIFS_SESS_KEY_SIZE];
int ntlmv2_flag = FALSE;
int first_time = 0;
@@ -3526,20 +3255,13 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
pSesInfo->server->secMode,
pSesInfo->server->capabilities,
pSesInfo->server->timeZone));
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if(experimEnabled > 1)
- rc = CIFS_SessSetup(xid, pSesInfo, CIFS_NTLM /* type */,
- &ntlmv2_flag, nls_info);
- else
-#endif
- if (extended_security
+ if(experimEnabled < 2)
+ rc = CIFS_SessSetup(xid, pSesInfo,
+ first_time, nls_info);
+ else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == NTLMSSP)) {
- cFYI(1, ("New style sesssetup"));
- rc = CIFSSpnegoSessSetup(xid, pSesInfo,
- NULL /* security blob */,
- 0 /* blob length */,
- nls_info);
+ rc = -EOPNOTSUPP;
} else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == RawNTLMSSP)) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 82315edc77d7..ba4cbe9b0684 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -113,7 +113,7 @@ cifs_bp_rename_retry:
full_path[namelen+2] = 0;
BB remove above eight lines BB */
-/* Inode operations in similar order to how they appear in the Linux file fs.h */
+/* Inode operations in similar order to how they appear in Linux file fs.h */
int
cifs_create(struct inode *inode, struct dentry *direntry, int mode,
@@ -178,11 +178,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
FreeXid(xid);
return -ENOMEM;
}
-
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ else
+ rc = -EIO; /* no NT SMB support fall into legacy open below */
+
if(rc == -EIO) {
/* old server, retry the open legacy style */
rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -191,7 +194,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_create returned 0x%x ", rc));
+ cFYI(1, ("cifs_create returned 0x%x", rc));
} else {
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
@@ -369,6 +372,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ /* BB FIXME - add handling for backlevel servers
+ which need legacy open and check for all
+ calls to SMBOpen for fallback to
+ SMBLeagcyOpen */
if(!rc) {
/* BB Do not bother to decode buf since no
local inode yet to put timestamps in,
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 633a93811328..d91a3d44e9e3 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -91,14 +91,14 @@ int cifs_dir_notify(struct file * file, unsigned long arg)
if(full_path == NULL) {
rc = -ENOMEM;
} else {
- cERROR(1,("cifs dir notify on file %s with arg 0x%lx",full_path,arg)); /* BB removeme BB */
+ cFYI(1,("dir notify on file %s Arg 0x%lx",full_path,arg));
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
GENERIC_READ | SYNCHRONIZE, 0 /* create options */,
&netfid, &oplock,NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
/* BB fixme - add this handle to a notify handle list */
if(rc) {
- cERROR(1,("Could not open directory for notify")); /* BB remove BB */
+ cFYI(1,("Could not open directory for notify"));
} else {
filter = convert_to_cifs_notify_flags(arg);
if(filter != 0) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index b4a18c1cab0a..5861eb42e626 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -110,7 +110,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
&pCifsInode->openFileList);
}
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
if (pCifsInode->clientCanCacheRead) {
/* we have the inode open somewhere else
no need to discard cache data */
@@ -201,7 +200,7 @@ int cifs_open(struct inode *inode, struct file *file)
} else {
if (file->f_flags & O_EXCL)
cERROR(1, ("could not find file instance for "
- "new file %p ", file));
+ "new file %p", file));
}
}
@@ -260,10 +259,15 @@ int cifs_open(struct inode *inode, struct file *file)
rc = -ENOMEM;
goto out;
}
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
- CREATE_NOT_DIR, &netfid, &oplock, buf,
+
+ if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR);
+ else
+ rc = -EIO; /* no NT SMB support fall into legacy open below */
+
if (rc == -EIO) {
/* Old server, try legacy style OpenX */
rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -272,7 +276,7 @@ int cifs_open(struct inode *inode, struct file *file)
& CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_open returned 0x%x ", rc));
+ cFYI(1, ("cifs_open returned 0x%x", rc));
goto out;
}
file->private_data =
@@ -282,7 +286,6 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
- write_lock(&file->f_owner.lock);
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist, &pTcon->openFileList);
@@ -293,7 +296,6 @@ int cifs_open(struct inode *inode, struct file *file)
&oplock, buf, full_path, xid);
} else {
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
}
if (oplock & CIFS_CREATE_ACTION) {
@@ -409,8 +411,8 @@ static int cifs_reopen_file(struct inode *inode, struct file *file,
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
up(&pCifsFile->fh_sem);
- cFYI(1, ("cifs_open returned 0x%x ", rc));
- cFYI(1, ("oplock: %d ", oplock));
+ cFYI(1, ("cifs_open returned 0x%x", rc));
+ cFYI(1, ("oplock: %d", oplock));
} else {
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = FALSE;
@@ -472,7 +474,6 @@ int cifs_close(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
if (pSMBFile) {
pSMBFile->closePend = TRUE;
- write_lock(&file->f_owner.lock);
if (pTcon) {
/* no sense reconnecting to close a file that is
already closed */
@@ -487,23 +488,18 @@ int cifs_close(struct inode *inode, struct file *file)
the struct would be in each open file,
but this should give enough time to
clear the socket */
- write_unlock(&file->f_owner.lock);
cERROR(1,("close with pending writes"));
msleep(timeout);
- write_lock(&file->f_owner.lock);
timeout *= 4;
}
- write_unlock(&file->f_owner.lock);
rc = CIFSSMBClose(xid, pTcon,
pSMBFile->netfid);
- write_lock(&file->f_owner.lock);
}
}
write_lock(&GlobalSMBSeslock);
list_del(&pSMBFile->flist);
list_del(&pSMBFile->tlist);
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
kfree(pSMBFile->search_resume_name);
kfree(file->private_data);
file->private_data = NULL;
@@ -531,7 +527,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
(struct cifsFileInfo *)file->private_data;
char *ptmp;
- cFYI(1, ("Closedir inode = 0x%p with ", inode));
+ cFYI(1, ("Closedir inode = 0x%p", inode));
xid = GetXid();
@@ -605,7 +601,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
}
if (pfLock->fl_flags & FL_ACCESS)
cFYI(1, ("Process suspended by mandatory locking - "
- "not implemented yet "));
+ "not implemented yet"));
if (pfLock->fl_flags & FL_LEASE)
cFYI(1, ("Lease on file - not implemented yet"));
if (pfLock->fl_flags &
@@ -1375,7 +1371,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
xid = GetXid();
- cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
+ cFYI(1, ("Sync file - name: %s datasync: 0x%x",
dentry->d_name.name, datasync));
rc = filemap_fdatawrite(inode->i_mapping);
@@ -1404,7 +1400,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
+/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
#if 0
if (rc < 0)
@@ -1836,7 +1832,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
if (rc < 0)
goto io_error;
else
- cFYI(1, ("Bytes read %d ",rc));
+ cFYI(1, ("Bytes read %d",rc));
file->f_dentry->d_inode->i_atime =
current_fs_time(file->f_dentry->d_inode->i_sb);
@@ -1946,7 +1942,7 @@ static int cifs_prepare_write(struct file *file, struct page *page,
return 0;
}
-struct address_space_operations cifs_addr_ops = {
+const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
.writepage = cifs_writepage,
@@ -1957,3 +1953,19 @@ struct address_space_operations cifs_addr_ops = {
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
+
+/*
+ * cifs_readpages requires the server to support a buffer large enough to
+ * contain the header plus one complete page of data. Otherwise, we need
+ * to leave cifs_readpages out of the address space operations.
+ */
+const struct address_space_operations cifs_addr_ops_smallbuf = {
+ .readpage = cifs_readpage,
+ .writepage = cifs_writepage,
+ .writepages = cifs_writepages,
+ .prepare_write = cifs_prepare_write,
+ .commit_write = cifs_commit_write,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ /* .sync_page = cifs_sync_page, */
+ /* .direct_IO = */
+};
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4093764ef461..b88147c1dc27 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -41,7 +41,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
char *tmp_path;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s ", search_path));
+ cFYI(1, ("Getting info on %s", search_path));
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
@@ -97,9 +97,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
inode = *pinode;
cifsInfo = CIFS_I(inode);
- cFYI(1, ("Old time %ld ", cifsInfo->time));
+ cFYI(1, ("Old time %ld", cifsInfo->time));
cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld ", cifsInfo->time));
+ cFYI(1, ("New time %ld", cifsInfo->time));
/* this is ok to set on every inode revalidate */
atomic_set(&cifsInfo->inUse,1);
@@ -180,11 +180,12 @@ int cifs_get_inode_info_unix(struct inode **pinode,
else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
- inode->i_data.a_ops = &cifs_addr_ops;
/* check if server can support readpages */
if(pTcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ inode->i_data.a_ops = &cifs_addr_ops;
} else if (S_ISDIR(inode->i_mode)) {
cFYI(1, ("Directory inode"));
inode->i_op = &cifs_dir_inode_ops;
@@ -421,23 +422,23 @@ int cifs_get_inode_info(struct inode **pinode,
inode = *pinode;
cifsInfo = CIFS_I(inode);
cifsInfo->cifsAttrs = attr;
- cFYI(1, ("Old time %ld ", cifsInfo->time));
+ cFYI(1, ("Old time %ld", cifsInfo->time));
cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld ", cifsInfo->time));
+ cFYI(1, ("New time %ld", cifsInfo->time));
/* blksize needs to be multiple of two. So safer to default to
blksize and blkbits set in superblock so 2**blkbits and blksize
will match rather than setting to:
(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
- /* Linux can not store file creation time unfortunately so we ignore it */
+ /* Linux can not store file creation time so ignore it */
inode->i_atime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
inode->i_mtime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
inode->i_ctime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
- cFYI(0, ("Attributes came in as 0x%x ", attr));
+ cFYI(0, ("Attributes came in as 0x%x", attr));
/* set default mode. will override for dirs below */
if (atomic_read(&cifsInfo->inUse) == 0)
@@ -519,10 +520,11 @@ int cifs_get_inode_info(struct inode **pinode,
else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
- inode->i_data.a_ops = &cifs_addr_ops;
if(pTcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ inode->i_data.a_ops = &cifs_addr_ops;
} else if (S_ISDIR(inode->i_mode)) {
cFYI(1, ("Directory inode"));
inode->i_op = &cifs_dir_inode_ops;
@@ -731,7 +733,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cFYI(1, ("cifs_mkdir returned 0x%x ", rc));
+ cFYI(1, ("cifs_mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
inode->i_nlink++;
@@ -798,7 +800,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
- cFYI(1, ("cifs_rmdir, inode = 0x%p with ", inode));
+ cFYI(1, ("cifs_rmdir, inode = 0x%p", inode));
xid = GetXid();
@@ -1121,7 +1123,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
xid = GetXid();
- cFYI(1, ("In cifs_setattr, name = %s attrs->iavalid 0x%x ",
+ cFYI(1, ("setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid));
cifs_sb = CIFS_SB(direntry->d_inode->i_sb);
@@ -1157,6 +1159,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
when the local oplock break takes longer to flush
writebehind data than the SMB timeout for the SetPathInfo
request would allow */
+
open_file = find_writable_file(cifsInode);
if (open_file) {
__u16 nfid = open_file->netfid;
@@ -1289,7 +1292,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
it may be useful to Windows - but we do
not want to set ctime unless some other
timestamp is changing */
- cFYI(1, ("CIFS - CTIME changed "));
+ cFYI(1, ("CIFS - CTIME changed"));
time_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
@@ -1356,7 +1359,7 @@ cifs_setattr_exit:
void cifs_delete_inode(struct inode *inode)
{
- cFYI(1, ("In cifs_delete_inode, inode = 0x%p ", inode));
+ cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 2ec99f833142..a57f5d6e6213 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -167,7 +167,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
return -ENOMEM;
}
- cFYI(1, ("Full path: %s ", full_path));
+ cFYI(1, ("Full path: %s", full_path));
cFYI(1, ("symname is %s", symname));
/* BB what if DFS and this volume is on different share? BB */
@@ -186,8 +186,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
inode->i_sb,xid);
if (rc != 0) {
- cFYI(1,
- ("Create symlink worked but get_inode_info failed with rc = %d ",
+ cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
rc));
} else {
if (pTcon->nocase)
@@ -289,7 +288,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
else {
cFYI(1,("num referral: %d",num_referrals));
if(referrals) {
- cFYI(1,("referral string: %s ",referrals));
+ cFYI(1,("referral string: %s",referrals));
strncpy(tmpbuffer, referrals, len-1);
}
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index fafd056426e4..22c937e5884f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -101,6 +101,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
kfree(buf_to_free->password);
+ kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
@@ -499,11 +500,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
- pnotify = (struct file_notify_information *)((char *)&pSMBr->hdr.Protocol
- + data_offset);
- cFYI(1,("dnotify on %s with action: 0x%x",pnotify->FileName,
+ pnotify = (struct file_notify_information *)
+ ((char *)&pSMBr->hdr.Protocol + data_offset);
+ cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName,
pnotify->Action)); /* BB removeme BB */
- /* cifs_dump_mem("Received notify Data is: ",buf,sizeof(struct smb_hdr)+60); */
+ /* cifs_dump_mem("Rcvd notify Data: ",buf,
+ sizeof(struct smb_hdr)+60); */
return TRUE;
}
if(pSMBr->hdr.Status.CifsError) {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 5de74d216fdd..b66eff5dc624 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -84,11 +84,11 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{ERRerror, -EIO},
- {ERRbadpw, -EPERM},
+ {ERRbadpw, -EACCES}, /* was EPERM */
{ERRbadtype, -EREMOTE},
{ERRaccess, -EACCES},
{ERRinvtid, -ENXIO},
- {ERRinvnetname, -ENODEV},
+ {ERRinvnetname, -ENXIO},
{ERRinvdevice, -ENXIO},
{ERRqfull, -ENOSPC},
{ERRqtoobig, -ENOSPC},
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
deleted file mode 100644
index 115359cc7a32..000000000000
--- a/fs/cifs/ntlmssp.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * fs/cifs/ntlmssp.h
- *
- * Copyright (c) International Business Machines Corp., 2006
- * Author(s): Steve French (sfrench@us.ibm.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "cifspdu.h"
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifs_unicode.h"
-#include "cifs_debug.h"
-#include "ntlmssp.h"
-#include "nterr.h"
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
-{
- __u32 capabilities = 0;
-
- /* init fields common to all four types of SessSetup */
- /* note that header is initialized to zero in header_assemble */
- pSMB->req.AndXCommand = 0xFF;
- pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
- pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
-
- /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
-
- /* BB verify whether signing required on neg or just on auth frame
- (and NTLM case) */
-
- capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
- CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
-
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
- if (ses->capabilities & CAP_UNICODE) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
- capabilities |= CAP_UNICODE;
- }
- if (ses->capabilities & CAP_STATUS32) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
- capabilities |= CAP_STATUS32;
- }
- if (ses->capabilities & CAP_DFS) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
- capabilities |= CAP_DFS;
- }
-
- /* BB check whether to init vcnum BB */
- return capabilities;
-}
-int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
- int * pNTLMv2_flg, const struct nls_table *nls_cp)
-{
- int rc = 0;
- int wct;
- struct smb_hdr *smb_buffer;
- char *bcc_ptr;
- SESSION_SETUP_ANDX *pSMB;
- __u32 capabilities;
-
- if(ses == NULL)
- return -EINVAL;
-
- cFYI(1,("SStp type: %d",type));
- if(type < CIFS_NTLM) {
-#ifndef CONFIG_CIFS_WEAK_PW_HASH
- /* LANMAN and plaintext are less secure and off by default.
- So we make this explicitly be turned on in kconfig (in the
- build) and turned on at runtime (changed from the default)
- in proc/fs/cifs or via mount parm. Unfortunately this is
- needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
- return -EOPNOTSUPP;
-#endif
- wct = 10; /* lanman 2 style sessionsetup */
- } else if(type < CIFS_NTLMSSP_NEG)
- wct = 13; /* old style NTLM sessionsetup */
- else /* same size for negotiate or auth, NTLMSSP or extended security */
- wct = 12;
-
- rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
- (void **)&smb_buffer);
- if(rc)
- return rc;
-
- pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
-
- capabilities = cifs_ssetup_hdr(ses, pSMB);
- bcc_ptr = pByteArea(smb_buffer);
- if(type > CIFS_NTLM) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- capabilities |= CAP_EXTENDED_SECURITY;
- pSMB->req.Capabilities = cpu_to_le32(capabilities);
- /* BB set password lengths */
- } else if(type < CIFS_NTLM) /* lanman */ {
- /* no capabilities flags in old lanman negotiation */
- /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
- } else /* type CIFS_NTLM */ {
- pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
- pSMB->req_no_secext.CaseInsensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- }
-
-
- /* copy session key */
-
- /* if Unicode, align strings to two byte boundary */
-
- /* copy user name */ /* BB Do we need to special case null user name? */
-
- /* copy domain name */
-
- /* copy Linux version */
-
- /* copy network operating system name */
-
- /* update bcc and smb buffer length */
-
-/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
- /* SMB request buf freed in SendReceive2 */
-
- return rc;
-}
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b689c5035124..03bbcb377913 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -21,6 +21,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/smp_lock.h>
#include "cifspdu.h"
@@ -31,8 +32,8 @@
#include "cifs_fs_sb.h"
#include "cifsfs.h"
-/* BB fixme - add debug wrappers around this function to disable it fixme BB */
-/* static void dump_cifs_file_struct(struct file *file, char *label)
+#ifdef CONFIG_CIFS_DEBUG2
+static void dump_cifs_file_struct(struct file *file, char *label)
{
struct cifsFileInfo * cf;
@@ -53,7 +54,8 @@
}
}
-} */
+}
+#endif /* DEBUG2 */
/* Returns one if new inode created (which therefore needs to be hashed) */
/* Might check in the future if inode number changed so we can rehash inode */
@@ -107,32 +109,52 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
return rc;
}
-static void fill_in_inode(struct inode *tmp_inode,
- FILE_DIRECTORY_INFO *pfindData, int *pobject_type, int isNewInode)
+static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
+ char * buf, int *pobject_type, int isNewInode)
{
loff_t local_size;
struct timespec local_mtime;
struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
- __u32 attr = le32_to_cpu(pfindData->ExtFileAttributes);
- __u64 allocation_size = le64_to_cpu(pfindData->AllocationSize);
- __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
-
- cifsInfo->cifsAttrs = attr;
- cifsInfo->time = jiffies;
+ __u32 attr;
+ __u64 allocation_size;
+ __u64 end_of_file;
/* save mtime and size */
local_mtime = tmp_inode->i_mtime;
local_size = tmp_inode->i_size;
+ if(new_buf_type) {
+ FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf;
+
+ attr = le32_to_cpu(pfindData->ExtFileAttributes);
+ allocation_size = le64_to_cpu(pfindData->AllocationSize);
+ end_of_file = le64_to_cpu(pfindData->EndOfFile);
+ tmp_inode->i_atime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+ tmp_inode->i_mtime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
+ tmp_inode->i_ctime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+ } else { /* legacy, OS2 and DOS style */
+ FIND_FILE_STANDARD_INFO * pfindData =
+ (FIND_FILE_STANDARD_INFO *)buf;
+
+ attr = le16_to_cpu(pfindData->Attributes);
+ allocation_size = le32_to_cpu(pfindData->AllocationSize);
+ end_of_file = le32_to_cpu(pfindData->DataSize);
+ tmp_inode->i_atime = CURRENT_TIME;
+ /* tmp_inode->i_mtime = BB FIXME - add dos time handling
+ tmp_inode->i_ctime = 0; BB FIXME */
+
+ }
+
/* Linux can not store file creation time unfortunately so ignore it */
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+
+ cifsInfo->cifsAttrs = attr;
+ cifsInfo->time = jiffies;
+
/* treat dos attribute of read-only as read-only mode bit e.g. 555? */
/* 2767 perms - indicate mandatory locking */
/* BB fill in uid and gid here? with help from winbind?
@@ -215,11 +237,13 @@ static void fill_in_inode(struct inode *tmp_inode,
else
tmp_inode->i_fop = &cifs_file_ops;
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
(cifs_sb->tcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
+ tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ tmp_inode->i_data.a_ops = &cifs_addr_ops;
+
if(isNewInode)
return; /* No sense invalidating pages for new inode
since have not started caching readahead file
@@ -338,11 +362,12 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
else
tmp_inode->i_fop = &cifs_file_ops;
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
(cifs_sb->tcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
+ tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ tmp_inode->i_data.a_ops = &cifs_addr_ops;
if(isNewInode)
return; /* No sense invalidating pages for new inode since we
@@ -415,7 +440,10 @@ static int initiate_cifs_search(const int xid, struct file *file)
ffirst_retry:
/* test for Unix extensions */
if (pTcon->ses->capabilities & CAP_UNIX) {
- cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
+ } else if ((pTcon->ses->capabilities &
+ (CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
} else /* not srvinos - BB fixme add check for backlevel? */ {
@@ -451,12 +479,19 @@ static int cifs_unicode_bytelen(char *str)
return len << 1;
}
-static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
+static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
{
char * new_entry;
FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
- new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+ if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pfData;
+ pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
+
+ new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
+ pfData->FileNameLength;
+ } else
+ new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
/* validate that new_entry is not past end of SMB */
if(new_entry >= end_of_smb) {
@@ -464,7 +499,10 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
("search entry %p began after end of SMB %p old entry %p",
new_entry, end_of_smb, old_entry));
return NULL;
- } else if (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb) {
+ } else if(((level == SMB_FIND_FILE_INFO_STANDARD) &&
+ (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) ||
+ ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+ (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
cERROR(1,("search entry %p extends after end of SMB %p",
new_entry, end_of_smb));
return NULL;
@@ -482,7 +520,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
char * filename = NULL;
int len = 0;
- if(cfile->srch_inf.info_level == 0x202) {
+ if(cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
if(cfile->srch_inf.unicode) {
@@ -491,26 +529,34 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
/* BB should we make this strnlen of PATH_MAX? */
len = strnlen(filename, 5);
}
- } else if(cfile->srch_inf.info_level == 0x101) {
+ } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
FILE_DIRECTORY_INFO * pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x102) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
FILE_FULL_DIRECTORY_INFO * pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x105) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_ID_FULL_DIR_INFO) {
SEARCH_ID_FULL_DIR_INFO * pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x104) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
FILE_BOTH_DIRECTORY_INFO * pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
+ } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ len = le32_to_cpu(pFindData->FileNameLength);
} else {
cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
}
@@ -597,7 +643,9 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
. and .. for the root of a drive and for those we need
to start two entries earlier */
-/* dump_cifs_file_struct(file, "In fce ");*/
+#ifdef CONFIG_CIFS_DEBUG2
+ dump_cifs_file_struct(file, "In fce ");
+#endif
if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
@@ -644,10 +692,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
- cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+ cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+
for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
/* go entry by entry figuring out which is first */
- current_entry = nxt_dir_entry(current_entry,end_of_smb);
+ current_entry = nxt_dir_entry(current_entry,end_of_smb,
+ cifsFile->srch_inf.info_level);
}
if((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
@@ -674,7 +724,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
/* inode num, inode type and filename returned */
static int cifs_get_name_from_search_buf(struct qstr *pqst,
char *current_entry, __u16 level, unsigned int unicode,
- struct cifs_sb_info * cifs_sb, ino_t *pinum)
+ struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum)
{
int rc = 0;
unsigned int len = 0;
@@ -718,10 +768,22 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
+ } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ /* one byte length, no name conversion */
+ len = (unsigned int)pFindData->FileNameLength;
} else {
cFYI(1,("Unknown findfirst level %d",level));
return -EINVAL;
}
+
+ if(len > max_len) {
+ cERROR(1,("bad search response length %d past smb end", len));
+ return -EINVAL;
+ }
+
if(unicode) {
/* BB fixme - test with long names */
/* Note converted filename can be longer than in unicode */
@@ -741,7 +803,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
}
static int cifs_filldir(char *pfindEntry, struct file *file,
- filldir_t filldir, void *direntry, char *scratch_buf)
+ filldir_t filldir, void *direntry, char *scratch_buf, int max_len)
{
int rc = 0;
struct qstr qstring;
@@ -777,6 +839,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
pCifsF->srch_inf.info_level,
pCifsF->srch_inf.unicode,cifs_sb,
+ max_len,
&inum /* returned */);
if(rc)
@@ -798,13 +861,16 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
/* we pass in rc below, indicating whether it is a new inode,
so we can figure out whether to invalidate the inode cached
data if the file has changed */
- if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
+ if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
unix_fill_in_inode(tmp_inode,
- (FILE_UNIX_INFO *)pfindEntry,&obj_type, rc);
- } else {
- fill_in_inode(tmp_inode,
- (FILE_DIRECTORY_INFO *)pfindEntry,&obj_type, rc);
- }
+ (FILE_UNIX_INFO *)pfindEntry,
+ &obj_type, rc);
+ else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
+ fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
+ pfindEntry, &obj_type, rc);
+ else
+ fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
+
rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,
tmp_inode->i_ino,obj_type);
@@ -864,6 +930,12 @@ static int cifs_save_resume_key(const char *current_entry,
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+ } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ /* one byte length, no name conversion */
+ len = (unsigned int)pFindData->FileNameLength;
} else {
cFYI(1,("Unknown findfirst level %d",level));
return -EINVAL;
@@ -884,6 +956,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
int num_to_fill = 0;
char * tmp_buf = NULL;
char * end_of_smb;
+ int max_len;
xid = GetXid();
@@ -909,7 +982,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 1:
if (filldir(direntry, "..", 2, file->f_pos,
file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for parent dir failed "));
+ cERROR(1, ("Filldir for parent dir failed"));
rc = -ENOMEM;
break;
}
@@ -959,10 +1032,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
goto rddir2_exit;
}
cFYI(1,("loop through %d times filling dir for net buf %p",
- num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
- end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
- smbCalcSize((struct smb_hdr *)
- cifsFile->srch_inf.ntwrk_buf_start);
+ num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
+ max_len = smbCalcSize((struct smb_hdr *)
+ cifsFile->srch_inf.ntwrk_buf_start);
+ end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
+
/* To be safe - for UCS to UTF-8 with strings loaded
with the rare long characters alloc more to account for
such multibyte target UTF-8 characters. cifs_unicode.c,
@@ -977,17 +1051,19 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
}
/* if buggy server returns . and .. late do
we want to check for that here? */
- rc = cifs_filldir(current_entry, file,
- filldir, direntry,tmp_buf);
+ rc = cifs_filldir(current_entry, file,
+ filldir, direntry, tmp_buf, max_len);
file->f_pos++;
- if(file->f_pos == cifsFile->srch_inf.index_of_last_entry) {
+ if(file->f_pos ==
+ cifsFile->srch_inf.index_of_last_entry) {
cFYI(1,("last entry in buf at pos %lld %s",
- file->f_pos,tmp_buf)); /* BB removeme BB */
+ file->f_pos,tmp_buf));
cifs_save_resume_key(current_entry,cifsFile);
break;
} else
- current_entry = nxt_dir_entry(current_entry,
- end_of_smb);
+ current_entry =
+ nxt_dir_entry(current_entry, end_of_smb,
+ cifsFile->srch_inf.info_level);
}
kfree(tmp_buf);
break;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
new file mode 100644
index 000000000000..7202d534ef0b
--- /dev/null
+++ b/fs/cifs/sess.c
@@ -0,0 +1,538 @@
+/*
+ * fs/cifs/sess.c
+ *
+ * SMB/CIFS session setup handling routines
+ *
+ * Copyright (c) International Business Machines Corp., 2006
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "ntlmssp.h"
+#include "nterr.h"
+#include <linux/utsname.h>
+
+extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
+ unsigned char *p24);
+
+static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+{
+ __u32 capabilities = 0;
+
+ /* init fields common to all four types of SessSetup */
+ /* note that header is initialized to zero in header_assemble */
+ pSMB->req.AndXCommand = 0xFF;
+ pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+ pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+ /* BB verify whether signing required on neg or just on auth frame
+ (and NTLM case) */
+
+ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+ CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+
+ if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+ if (ses->capabilities & CAP_UNICODE) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
+ capabilities |= CAP_UNICODE;
+ }
+ if (ses->capabilities & CAP_STATUS32) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+ capabilities |= CAP_STATUS32;
+ }
+ if (ses->capabilities & CAP_DFS) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
+ capabilities |= CAP_DFS;
+ }
+ if (ses->capabilities & CAP_UNIX) {
+ capabilities |= CAP_UNIX;
+ }
+
+ /* BB check whether to init vcnum BB */
+ return capabilities;
+}
+
+static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ char * bcc_ptr = *pbcc_area;
+ int bytes_ret = 0;
+
+ /* BB FIXME add check that strings total less
+ than 335 or will need to send them as arrays */
+
+ /* unicode strings, must be word aligned before the call */
+/* if ((long) bcc_ptr % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ } */
+ /* copy user */
+ if(ses->userName == NULL) {
+ /* BB what about null user mounts - check that we do this BB */
+ } else { /* 300 should be long enough for any conceivable user name */
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ 300, nls_cp);
+ }
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null termination */
+ /* copy domain */
+ if(ses->domainName == NULL)
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr,
+ "CIFS_LINUX_DOM", 32, nls_cp);
+ else
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
+ 256, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null terminator */
+
+ /* Copy OS version */
+ bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32,
+ nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release,
+ 32, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* trailing null */
+
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
+ 32, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* trailing null */
+
+ *pbcc_area = bcc_ptr;
+}
+
+static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ char * bcc_ptr = *pbcc_area;
+
+ /* copy user */
+ /* BB what about null user mounts - check that we do this BB */
+ /* copy user */
+ if(ses->userName == NULL) {
+ /* BB what about null user mounts - check that we do this BB */
+ } else { /* 300 should be long enough for any conceivable user name */
+ strncpy(bcc_ptr, ses->userName, 300);
+ }
+ /* BB improve check for overflow */
+ bcc_ptr += strnlen(ses->userName, 300);
+ *bcc_ptr = 0;
+ bcc_ptr++; /* account for null termination */
+
+ /* copy domain */
+
+ if(ses->domainName == NULL) {
+ strcpy(bcc_ptr, "CIFS_LINUX_DOM");
+ bcc_ptr += 14; /* strlen(CIFS_LINUX_DOM) */
+ } else {
+ strncpy(bcc_ptr, ses->domainName, 256);
+ bcc_ptr += strnlen(ses->domainName, 256);
+ }
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+ /* BB check for overflow here */
+
+ strcpy(bcc_ptr, "Linux version ");
+ bcc_ptr += strlen("Linux version ");
+ strcpy(bcc_ptr, system_utsname.release);
+ bcc_ptr += strlen(system_utsname.release) + 1;
+
+ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+
+ *pbcc_area = bcc_ptr;
+}
+
+static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int words_left, len;
+ char * data = *pbcc_area;
+
+
+
+ cFYI(1,("bleft %d",bleft));
+
+
+ /* word align, if bytes remaining is not even */
+ if(bleft % 2) {
+ bleft--;
+ data++;
+ }
+ words_left = bleft / 2;
+
+ /* save off server operating system */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+/* We look for obvious messed up bcc or strings in response so we do not go off
+ the end since (at least) WIN2K and Windows XP have a major bug in not null
+ terminating last Unicode string in response */
+ if(len >= words_left)
+ return rc;
+
+ if(ses->serverOS)
+ kfree(ses->serverOS);
+ /* UTF-8 string will not grow more than four times as big as UCS-16 */
+ ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
+ if(ses->serverOS != NULL) {
+ cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len,
+ nls_cp);
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ /* save off server network operating system */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+ if(len >= words_left)
+ return rc;
+
+ if(ses->serverNOS)
+ kfree(ses->serverNOS);
+ ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
+ if(ses->serverNOS != NULL) {
+ cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
+ nls_cp);
+ if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) {
+ cFYI(1,("NT4 server"));
+ ses->flags |= CIFS_SES_NT4;
+ }
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ /* save off server domain */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+ if(len > words_left)
+ return rc;
+
+ if(ses->serverDomain)
+ kfree(ses->serverDomain);
+ ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
+ if(ses->serverDomain != NULL) {
+ cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
+ nls_cp);
+ ses->serverDomain[2*len] = 0;
+ ses->serverDomain[(2*len) + 1] = 0;
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ cFYI(1,("words left: %d",words_left));
+
+ return rc;
+}
+
+static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int len;
+ char * bcc_ptr = *pbcc_area;
+
+ cFYI(1,("decode sessetup ascii. bleft %d", bleft));
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len >= bleft)
+ return rc;
+
+ if(ses->serverOS)
+ kfree(ses->serverOS);
+
+ ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverOS)
+ strncpy(ses->serverOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len >= bleft)
+ return rc;
+
+ if(ses->serverNOS)
+ kfree(ses->serverNOS);
+
+ ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverNOS)
+ strncpy(ses->serverNOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len > bleft)
+ return rc;
+
+ if(ses->serverDomain)
+ kfree(ses->serverDomain);
+
+ ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverOS)
+ strncpy(ses->serverOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ cFYI(1,("ascii: bytes left %d",bleft));
+
+ return rc;
+}
+
+int
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
+ const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ int wct;
+ struct smb_hdr *smb_buf;
+ char *bcc_ptr;
+ char *str_area;
+ SESSION_SETUP_ANDX *pSMB;
+ __u32 capabilities;
+ int count;
+ int resp_buf_type = 0;
+ struct kvec iov[2];
+ enum securityEnum type;
+ __u16 action;
+ int bytes_remaining;
+
+ if(ses == NULL)
+ return -EINVAL;
+
+ type = ses->server->secType;
+
+ cFYI(1,("sess setup type %d",type));
+ if(type == LANMAN) {
+#ifndef CONFIG_CIFS_WEAK_PW_HASH
+ /* LANMAN and plaintext are less secure and off by default.
+ So we make this explicitly be turned on in kconfig (in the
+ build) and turned on at runtime (changed from the default)
+ in proc/fs/cifs or via mount parm. Unfortunately this is
+ needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
+ return -EOPNOTSUPP;
+#endif
+ wct = 10; /* lanman 2 style sessionsetup */
+ } else if((type == NTLM) || (type == NTLMv2)) {
+ /* For NTLMv2 failures eventually may need to retry NTLM */
+ wct = 13; /* old style NTLM sessionsetup */
+ } else /* same size for negotiate or auth, NTLMSSP or extended security */
+ wct = 12;
+
+ rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+ (void **)&smb_buf);
+ if(rc)
+ return rc;
+
+ pSMB = (SESSION_SETUP_ANDX *)smb_buf;
+
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+
+ /* we will send the SMB in two pieces,
+ a fixed length beginning part, and a
+ second part which will include the strings
+ and rest of bcc area, in order to avoid having
+ to do a large buffer 17K allocation */
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = smb_buf->smb_buf_length + 4;
+
+ /* 2000 big enough to fit max user, domain, NOS name etc. */
+ str_area = kmalloc(2000, GFP_KERNEL);
+ bcc_ptr = str_area;
+
+ if(type == LANMAN) {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ char lnm_session_key[CIFS_SESS_KEY_SIZE];
+
+ /* no capabilities flags in old lanman negotiation */
+
+ pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE;
+ /* BB calculate hash with password */
+ /* and copy into bcc */
+
+ calc_lanman_hash(ses, lnm_session_key);
+
+/* #ifdef CONFIG_CIFS_DEBUG2
+ cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
+ CIFS_SESS_KEY_SIZE);
+#endif */
+ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+
+ /* can not sign if LANMAN negotiated so no need
+ to calculate signing key? but what if server
+ changed to do higher than lanman dialect and
+ we reconnected would we ever calc signing_key? */
+
+ cFYI(1,("Negotiating LANMAN setting up strings"));
+ /* Unicode not allowed for LANMAN dialects */
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+#endif
+ } else if (type == NTLM) {
+ char ntlm_session_key[CIFS_SESS_KEY_SIZE];
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+
+ /* calculate session key */
+ SMBNTencrypt(ses->password, ses->server->cryptKey,
+ ntlm_session_key);
+
+ if(first_time) /* should this be moved into common code
+ with similar ntlmv2 path? */
+ cifs_calculate_mac_key(ses->server->mac_signing_key,
+ ntlm_session_key, ses->password);
+ /* copy session key */
+
+ memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ if(ses->capabilities & CAP_UNICODE) {
+ /* unicode strings must be word aligned */
+ if (iov[0].iov_len % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ }
+ unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else if (type == NTLMv2) {
+ char * v2_sess_key =
+ kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL);
+
+ /* BB FIXME change all users of v2_sess_key to
+ struct ntlmv2_resp */
+
+ if(v2_sess_key == NULL) {
+ cifs_small_buf_release(smb_buf);
+ return -ENOMEM;
+ }
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+
+ /* LM2 password would be here if we supported it */
+ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+ /* cpu_to_le16(LM2_SESS_KEY_SIZE); */
+
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(sizeof(struct ntlmv2_resp));
+
+ /* calculate session key */
+ setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+ if(first_time) /* should this be moved into common code
+ with similar ntlmv2 path? */
+ /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
+ response BB FIXME, v2_sess_key); */
+
+ /* copy session key */
+
+ /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
+ bcc_ptr += LM2_SESS_KEY_SIZE; */
+ memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp));
+ bcc_ptr += sizeof(struct ntlmv2_resp);
+ kfree(v2_sess_key);
+ if(ses->capabilities & CAP_UNICODE) {
+ if(iov[0].iov_len % 2) {
+ *bcc_ptr = 0;
+ } bcc_ptr++;
+ unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else /* NTLMSSP or SPNEGO */ {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities = cpu_to_le32(capabilities);
+ /* BB set password lengths */
+ }
+
+ count = (long) bcc_ptr - (long) str_area;
+ smb_buf->smb_buf_length += count;
+
+ BCC_LE(smb_buf) = cpu_to_le16(count);
+
+ iov[1].iov_base = str_area;
+ iov[1].iov_len = count;
+ rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
+ /* SMB request buf freed in SendReceive2 */
+
+ cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
+ if(rc)
+ goto ssetup_exit;
+
+ pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)iov[0].iov_base;
+
+ if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
+ rc = -EIO;
+ cERROR(1,("bad word count %d", smb_buf->WordCount));
+ goto ssetup_exit;
+ }
+ action = le16_to_cpu(pSMB->resp.Action);
+ if (action & GUEST_LOGIN)
+ cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cFYI(1, ("UID = %d ", ses->Suid));
+ /* response can have either 3 or 4 word count - Samba sends 3 */
+ /* and lanman response is 3 */
+ bytes_remaining = BCC(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
+
+ if(smb_buf->WordCount == 4) {
+ __u16 blob_len;
+ blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+ bcc_ptr += blob_len;
+ if(blob_len > bytes_remaining) {
+ cERROR(1,("bad security blob length %d", blob_len));
+ rc = -EINVAL;
+ goto ssetup_exit;
+ }
+ bytes_remaining -= blob_len;
+ }
+
+ /* BB check if Unicode and decode strings */
+ if(smb_buf->Flags2 & SMBFLG2_UNICODE)
+ rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
+ ses, nls_cp);
+ else
+ rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp);
+
+ssetup_exit:
+ kfree(str_area);
+ if(resp_buf_type == CIFS_SMALL_BUFFER) {
+ cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base));
+ cifs_small_buf_release(iov[0].iov_base);
+ } else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ cifs_buf_release(iov[0].iov_base);
+
+ return rc;
+}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6103bcdfb16d..f518c5e45035 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -30,6 +30,7 @@
#include <linux/random.h>
#include "cifs_unicode.h"
#include "cifspdu.h"
+#include "cifsglob.h"
#include "md5.h"
#include "cifs_debug.h"
#include "cifsencrypt.h"
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3da80409466c..17ba329e2b3d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -654,8 +654,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
up(&ses->server->tcpSem);
- cERROR(1,
- ("Illegal length, greater than maximum frame, %d ",
+ cERROR(1, ("Illegal length, greater than maximum frame, %d",
in_buf->smb_buf_length));
DeleteMidQEntry(midQ);
/* If not lock req, update # of requests on wire to server */
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 6c6771db36da..7caee8d8ea3b 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -259,7 +259,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
/* If request was not a signal, enqueue and don't free */
if (!(req->uc_flags & REQ_ASYNC)) {
req->uc_flags |= REQ_READ;
- list_add(&(req->uc_chain), vcp->vc_processing.prev);
+ list_add_tail(&(req->uc_chain), &vcp->vc_processing);
goto out;
}
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index b35e5bbd9c99..76e00a65a75b 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -50,6 +50,6 @@ fail:
return error;
}
-struct address_space_operations coda_symlink_aops = {
+const struct address_space_operations coda_symlink_aops = {
.readpage = coda_symlink_filler,
};
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index b040eba13a7d..a5b5e631ba61 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -725,7 +725,7 @@ static int coda_upcall(struct coda_sb_info *sbi,
((union inputArgs *)buffer)->ih.unique = req->uc_unique;
/* Append msg to pending queue and poke Venus. */
- list_add(&(req->uc_chain), vcommp->vc_pending.prev);
+ list_add_tail(&(req->uc_chain), &vcommp->vc_pending);
wake_up_interruptible(&vcommp->vc_waitq);
/* We can be interrupted while we wait for Venus to process
diff --git a/fs/compat.c b/fs/compat.c
index 7e7e5bc4f3cf..e31e9cf96647 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -55,6 +55,20 @@
extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
+int compat_log = 1;
+
+int compat_printk(const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ if (!compat_log)
+ return 0;
+ va_start(ap, fmt);
+ ret = vprintk(fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
/*
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
@@ -359,7 +373,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd,
sprintf(buf,"'%c'", (cmd>>24) & 0x3f);
if (!isprint(buf[1]))
sprintf(buf, "%02x", buf[1]);
- printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
+ compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
"cmd(%08x){%s} arg(%08x) on %s\n",
current->comm, current->pid,
(int)fd, (unsigned int)cmd, buf,
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 9eb9824dd332..d8ecfedef189 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -80,6 +80,7 @@
#include <net/bluetooth/rfcomm.h>
#include <linux/capi.h>
+#include <linux/gigaset_dev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5f952187fc53..207f8006fd6c 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1009,8 +1009,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
/* fallthrough */
default:
if (filp->f_pos == 2) {
- list_del(q);
- list_add(q, &parent_sd->s_children);
+ list_move(q, &parent_sd->s_children);
}
for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
struct configfs_dirent *next;
@@ -1033,8 +1032,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
dt_type(next)) < 0)
return 0;
- list_del(q);
- list_add(q, p);
+ list_move(q, p);
p = q;
filp->f_pos++;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index c153bd9534cb..e14488ca6411 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -38,7 +38,7 @@
extern struct super_block * configfs_sb;
-static struct address_space_operations configfs_aops = {
+static const struct address_space_operations configfs_aops = {
.readpage = simple_readpage,
.prepare_write = simple_prepare_write,
.commit_write = simple_commit_write
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index c45d73860803..223c0431042d 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -30,7 +30,7 @@
static struct super_operations cramfs_ops;
static struct inode_operations cramfs_dir_inode_operations;
static const struct file_operations cramfs_directory_operations;
-static struct address_space_operations cramfs_aops;
+static const struct address_space_operations cramfs_aops;
static DEFINE_MUTEX(read_mutex);
@@ -501,7 +501,7 @@ static int cramfs_readpage(struct file *file, struct page * page)
return 0;
}
-static struct address_space_operations cramfs_aops = {
+static const struct address_space_operations cramfs_aops = {
.readpage = cramfs_readpage
};
diff --git a/fs/dcache.c b/fs/dcache.c
index b85fda360533..48b44a714b35 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -522,8 +522,7 @@ void shrink_dcache_sb(struct super_block * sb)
dentry = list_entry(tmp, struct dentry, d_lru);
if (dentry->d_sb != sb)
continue;
- list_del(tmp);
- list_add(tmp, &dentry_unused);
+ list_move(tmp, &dentry_unused);
}
/*
@@ -638,7 +637,7 @@ resume:
* of the unused list for prune_dcache
*/
if (!atomic_read(&dentry->d_count)) {
- list_add(&dentry->d_lru, dentry_unused.prev);
+ list_add_tail(&dentry->d_lru, &dentry_unused);
dentry_stat.nr_unused++;
found++;
}
diff --git a/fs/dquot.c b/fs/dquot.c
index 81d87a413c68..0122a279106a 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -250,7 +250,7 @@ static inline struct dquot *find_dquot(unsigned int hashent, struct super_block
/* Add a dquot to the tail of the free list */
static inline void put_dquot_last(struct dquot *dquot)
{
- list_add(&dquot->dq_free, free_dquots.prev);
+ list_add_tail(&dquot->dq_free, &free_dquots);
dqstats.free_dquots++;
}
@@ -266,7 +266,7 @@ static inline void put_inuse(struct dquot *dquot)
{
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
- list_add(&dquot->dq_inuse, inuse_list.prev);
+ list_add_tail(&dquot->dq_inuse, &inuse_list);
dqstats.allocated_dquots++;
}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 180607f9314d..174696f9bf14 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -21,7 +21,7 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,efs_get_block);
}
-static struct address_space_operations efs_aops = {
+static const struct address_space_operations efs_aops = {
.readpage = efs_readpage,
.sync_page = block_sync_page,
.bmap = _efs_bmap
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index 3d9a350e3e7f..e249cf733a6b 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -53,6 +53,6 @@ fail:
return err;
}
-struct address_space_operations efs_symlink_aops = {
+const struct address_space_operations efs_symlink_aops = {
.readpage = efs_symlink_readpage
};
diff --git a/fs/exec.c b/fs/exec.c
index 0b88bf646143..c8494f513eaf 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -666,8 +666,6 @@ static int de_thread(struct task_struct *tsk)
* and to assume its PID:
*/
if (!thread_group_leader(current)) {
- struct dentry *proc_dentry1, *proc_dentry2;
-
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
@@ -689,10 +687,6 @@ static int de_thread(struct task_struct *tsk)
*/
current->start_time = leader->start_time;
- spin_lock(&leader->proc_lock);
- spin_lock(&current->proc_lock);
- proc_dentry1 = proc_pid_unhash(current);
- proc_dentry2 = proc_pid_unhash(leader);
write_lock_irq(&tasklist_lock);
BUG_ON(leader->tgid != current->tgid);
@@ -713,7 +707,7 @@ static int de_thread(struct task_struct *tsk)
attach_pid(current, PIDTYPE_PID, current->pid);
attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
attach_pid(current, PIDTYPE_SID, current->signal->session);
- list_add_tail_rcu(&current->tasks, &init_task.tasks);
+ list_replace_rcu(&leader->tasks, &current->tasks);
current->group_leader = current;
leader->group_leader = current;
@@ -721,7 +715,6 @@ static int de_thread(struct task_struct *tsk)
/* Reduce leader to a thread */
detach_pid(leader, PIDTYPE_PGID);
detach_pid(leader, PIDTYPE_SID);
- list_del_init(&leader->tasks);
current->exit_signal = SIGCHLD;
@@ -729,10 +722,6 @@ static int de_thread(struct task_struct *tsk)
leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
- spin_unlock(&leader->proc_lock);
- spin_unlock(&current->proc_lock);
- proc_pid_flush(proc_dentry1);
- proc_pid_flush(proc_dentry2);
}
/*
@@ -1379,67 +1368,102 @@ static void format_corename(char *corename, const char *pattern, long signr)
*out_ptr = 0;
}
-static void zap_threads (struct mm_struct *mm)
+static void zap_process(struct task_struct *start)
{
- struct task_struct *g, *p;
- struct task_struct *tsk = current;
- struct completion *vfork_done = tsk->vfork_done;
- int traced = 0;
+ struct task_struct *t;
- /*
- * Make sure nobody is waiting for us to release the VM,
- * otherwise we can deadlock when we wait on each other
- */
- if (vfork_done) {
- tsk->vfork_done = NULL;
- complete(vfork_done);
- }
+ start->signal->flags = SIGNAL_GROUP_EXIT;
+ start->signal->group_stop_count = 0;
- read_lock(&tasklist_lock);
- do_each_thread(g,p)
- if (mm == p->mm && p != tsk) {
- force_sig_specific(SIGKILL, p);
- mm->core_waiters++;
- if (unlikely(p->ptrace) &&
- unlikely(p->parent->mm == mm))
- traced = 1;
+ t = start;
+ do {
+ if (t != current && t->mm) {
+ t->mm->core_waiters++;
+ sigaddset(&t->pending.signal, SIGKILL);
+ signal_wake_up(t, 1);
}
- while_each_thread(g,p);
+ } while ((t = next_thread(t)) != start);
+}
- read_unlock(&tasklist_lock);
+static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+ int exit_code)
+{
+ struct task_struct *g, *p;
+ unsigned long flags;
+ int err = -EAGAIN;
+
+ spin_lock_irq(&tsk->sighand->siglock);
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+ tsk->signal->group_exit_code = exit_code;
+ zap_process(tsk);
+ err = 0;
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (err)
+ return err;
- if (unlikely(traced)) {
- /*
- * We are zapping a thread and the thread it ptraces.
- * If the tracee went into a ptrace stop for exit tracing,
- * we could deadlock since the tracer is waiting for this
- * coredump to finish. Detach them so they can both die.
- */
- write_lock_irq(&tasklist_lock);
- do_each_thread(g,p) {
- if (mm == p->mm && p != tsk &&
- p->ptrace && p->parent->mm == mm) {
- __ptrace_detach(p, 0);
+ if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+ goto done;
+
+ rcu_read_lock();
+ for_each_process(g) {
+ if (g == tsk->group_leader)
+ continue;
+
+ p = g;
+ do {
+ if (p->mm) {
+ if (p->mm == mm) {
+ /*
+ * p->sighand can't disappear, but
+ * may be changed by de_thread()
+ */
+ lock_task_sighand(p, &flags);
+ zap_process(p);
+ unlock_task_sighand(p, &flags);
+ }
+ break;
}
- } while_each_thread(g,p);
- write_unlock_irq(&tasklist_lock);
+ } while ((p = next_thread(p)) != g);
}
+ rcu_read_unlock();
+done:
+ return mm->core_waiters;
}
-static void coredump_wait(struct mm_struct *mm)
+static int coredump_wait(int exit_code)
{
- DECLARE_COMPLETION(startup_done);
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct completion startup_done;
+ struct completion *vfork_done;
int core_waiters;
+ init_completion(&mm->core_done);
+ init_completion(&startup_done);
mm->core_startup_done = &startup_done;
- zap_threads(mm);
- core_waiters = mm->core_waiters;
+ core_waiters = zap_threads(tsk, mm, exit_code);
up_write(&mm->mmap_sem);
+ if (unlikely(core_waiters < 0))
+ goto fail;
+
+ /*
+ * Make sure nobody is waiting for us to release the VM,
+ * otherwise we can deadlock when we wait on each other
+ */
+ vfork_done = tsk->vfork_done;
+ if (vfork_done) {
+ tsk->vfork_done = NULL;
+ complete(vfork_done);
+ }
+
if (core_waiters)
wait_for_completion(&startup_done);
+fail:
BUG_ON(mm->core_waiters);
+ return core_waiters;
}
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
@@ -1473,22 +1497,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
}
mm->dumpable = 0;
- retval = -EAGAIN;
- spin_lock_irq(&current->sighand->siglock);
- if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
- current->signal->flags = SIGNAL_GROUP_EXIT;
- current->signal->group_exit_code = exit_code;
- current->signal->group_stop_count = 0;
- retval = 0;
- }
- spin_unlock_irq(&current->sighand->siglock);
- if (retval) {
- up_write(&mm->mmap_sem);
+ retval = coredump_wait(exit_code);
+ if (retval < 0)
goto fail;
- }
-
- init_completion(&mm->core_done);
- coredump_wait(mm);
/*
* Clear any false indication of pending signals that might
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 9f74a62be555..e65a019fc7a5 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -162,9 +162,9 @@ extern const struct file_operations ext2_file_operations;
extern const struct file_operations ext2_xip_file_operations;
/* inode.c */
-extern struct address_space_operations ext2_aops;
-extern struct address_space_operations ext2_aops_xip;
-extern struct address_space_operations ext2_nobh_aops;
+extern const struct address_space_operations ext2_aops;
+extern const struct address_space_operations ext2_aops_xip;
+extern const struct address_space_operations ext2_nobh_aops;
/* namei.c */
extern struct inode_operations ext2_dir_inode_operations;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 04af9c45dce2..fb4d3220eb8d 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -684,7 +684,7 @@ ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
return mpage_writepages(mapping, wbc, ext2_get_block);
}
-struct address_space_operations ext2_aops = {
+const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_writepage,
@@ -697,12 +697,12 @@ struct address_space_operations ext2_aops = {
.migratepage = buffer_migrate_page,
};
-struct address_space_operations ext2_aops_xip = {
+const struct address_space_operations ext2_aops_xip = {
.bmap = ext2_bmap,
.get_xip_page = ext2_get_xip_page,
};
-struct address_space_operations ext2_nobh_aops = {
+const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_nobh_writepage,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 0321e1b9034a..f804d5e9d60c 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1698,7 +1698,7 @@ static int ext3_journalled_set_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page);
}
-static struct address_space_operations ext3_ordered_aops = {
+static const struct address_space_operations ext3_ordered_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_ordered_writepage,
@@ -1712,7 +1712,7 @@ static struct address_space_operations ext3_ordered_aops = {
.migratepage = buffer_migrate_page,
};
-static struct address_space_operations ext3_writeback_aops = {
+static const struct address_space_operations ext3_writeback_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_writeback_writepage,
@@ -1726,7 +1726,7 @@ static struct address_space_operations ext3_writeback_aops = {
.migratepage = buffer_migrate_page,
};
-static struct address_space_operations ext3_journalled_aops = {
+static const struct address_space_operations ext3_journalled_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_journalled_writepage,
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b2891cc29db1..b7483360a2db 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -630,7 +630,7 @@ enum {
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
- Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
+ Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -666,6 +666,7 @@ static match_table_t tokens = {
{Opt_noreservation, "noreservation"},
{Opt_noload, "noload"},
{Opt_nobh, "nobh"},
+ {Opt_bh, "bh"},
{Opt_commit, "commit=%u"},
{Opt_journal_update, "journal=update"},
{Opt_journal_inum, "journal=%u"},
@@ -1014,6 +1015,9 @@ clear_qf_name:
case Opt_nobh:
set_opt(sbi->s_mount_opt, NOBH);
break;
+ case Opt_bh:
+ clear_opt(sbi->s_mount_opt, NOBH);
+ break;
default:
printk (KERN_ERR
"EXT3-fs: Unrecognized mount option \"%s\" "
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 7c35d582ec10..31b7174176ba 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -196,7 +196,7 @@ static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, fat_get_block);
}
-static struct address_space_operations fat_aops = {
+static const struct address_space_operations fat_aops = {
.readpage = fat_readpage,
.readpages = fat_readpages,
.writepage = fat_writepage,
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index 6f5df1700e95..4e25f3fbed86 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -56,7 +56,7 @@ struct inode_operations vxfs_immed_symlink_iops = {
/*
* Adress space operations for immed files and directories.
*/
-struct address_space_operations vxfs_immed_aops = {
+const struct address_space_operations vxfs_immed_aops = {
.readpage = vxfs_immed_readpage,
};
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index f544aae9169f..ca6a39714771 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -41,8 +41,8 @@
#include "vxfs_extern.h"
-extern struct address_space_operations vxfs_aops;
-extern struct address_space_operations vxfs_immed_aops;
+extern const struct address_space_operations vxfs_aops;
+extern const struct address_space_operations vxfs_immed_aops;
extern struct inode_operations vxfs_immed_symlink_iops;
@@ -295,7 +295,7 @@ vxfs_read_inode(struct inode *ip)
{
struct super_block *sbp = ip->i_sb;
struct vxfs_inode_info *vip;
- struct address_space_operations *aops;
+ const struct address_space_operations *aops;
ino_t ino = ip->i_ino;
if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist)))
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index c1be118fc067..decac62efe57 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -42,7 +42,7 @@
static int vxfs_readpage(struct file *, struct page *);
static sector_t vxfs_bmap(struct address_space *, sector_t);
-struct address_space_operations vxfs_aops = {
+const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage,
.bmap = vxfs_bmap,
.sync_page = block_sync_page,
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 28aa81eae2cc..63614ed16336 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -770,7 +770,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
/* no mmap and sendfile */
};
-static struct address_space_operations fuse_file_aops = {
+static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.prepare_write = fuse_prepare_write,
.commit_write = fuse_commit_write,
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 3ed8663a8db1..735332dfd1b8 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -182,8 +182,8 @@ extern void hfs_file_truncate(struct inode *);
extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
/* inode.c */
-extern struct address_space_operations hfs_aops;
-extern struct address_space_operations hfs_btree_aops;
+extern const struct address_space_operations hfs_aops;
+extern const struct address_space_operations hfs_btree_aops;
extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 2d4ced22201b..315cf44a90b2 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -114,7 +114,7 @@ static int hfs_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, hfs_get_block);
}
-struct address_space_operations hfs_btree_aops = {
+const struct address_space_operations hfs_btree_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.sync_page = block_sync_page,
@@ -124,7 +124,7 @@ struct address_space_operations hfs_btree_aops = {
.releasepage = hfs_releasepage,
};
-struct address_space_operations hfs_aops = {
+const struct address_space_operations hfs_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.sync_page = block_sync_page,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 7ae393637a0c..8a1ca5ef7ada 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -323,8 +323,8 @@ int hfsplus_file_extend(struct inode *);
void hfsplus_file_truncate(struct inode *);
/* inode.c */
-extern struct address_space_operations hfsplus_aops;
-extern struct address_space_operations hfsplus_btree_aops;
+extern const struct address_space_operations hfsplus_aops;
+extern const struct address_space_operations hfsplus_btree_aops;
void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index acf66dba3e01..924ecdef8091 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -109,7 +109,7 @@ static int hfsplus_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, hfsplus_get_block);
}
-struct address_space_operations hfsplus_btree_aops = {
+const struct address_space_operations hfsplus_btree_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.sync_page = block_sync_page,
@@ -119,7 +119,7 @@ struct address_space_operations hfsplus_btree_aops = {
.releasepage = hfsplus_releasepage,
};
-struct address_space_operations hfsplus_aops = {
+const struct address_space_operations hfsplus_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.sync_page = block_sync_page,
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 8e0d37743e7c..b82e3d9c8790 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -54,7 +54,7 @@ static int append = 0;
static struct inode_operations hostfs_iops;
static struct inode_operations hostfs_dir_iops;
-static struct address_space_operations hostfs_link_aops;
+static const struct address_space_operations hostfs_link_aops;
#ifndef MODULE
static int __init hostfs_args(char *options, int *add)
@@ -518,7 +518,7 @@ int hostfs_commit_write(struct file *file, struct page *page, unsigned from,
return(err);
}
-static struct address_space_operations hostfs_aops = {
+static const struct address_space_operations hostfs_aops = {
.writepage = hostfs_writepage,
.readpage = hostfs_readpage,
.set_page_dirty = __set_page_dirty_nobuffers,
@@ -935,7 +935,7 @@ int hostfs_link_readpage(struct file *file, struct page *page)
return(err);
}
-static struct address_space_operations hostfs_link_aops = {
+static const struct address_space_operations hostfs_link_aops = {
.readpage = hostfs_link_readpage,
};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index d3b9fffe45a1..d9eb19b7b8ae 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -99,7 +99,7 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,hpfs_get_block);
}
-struct address_space_operations hpfs_aops = {
+const struct address_space_operations hpfs_aops = {
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
.sync_page = block_sync_page,
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 29b7a3e55173..f687d54ed442 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -268,7 +268,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int);
int hpfs_file_fsync(struct file *, struct dentry *, int);
extern const struct file_operations hpfs_file_ops;
extern struct inode_operations hpfs_file_iops;
-extern struct address_space_operations hpfs_aops;
+extern const struct address_space_operations hpfs_aops;
/* inode.c */
@@ -304,7 +304,7 @@ void hpfs_decide_conv(struct inode *, unsigned char *, unsigned);
/* namei.c */
extern struct inode_operations hpfs_dir_iops;
-extern struct address_space_operations hpfs_symlink_aops;
+extern const struct address_space_operations hpfs_symlink_aops;
static inline struct hpfs_inode_info *hpfs_i(struct inode *inode)
{
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index a03abb12c610..59e7dc182a0c 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -538,7 +538,7 @@ fail:
return err;
}
-struct address_space_operations hpfs_symlink_aops = {
+const struct address_space_operations hpfs_symlink_aops = {
.readpage = hpfs_symlink_readpage
};
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e6410d8edd0e..6449cb697967 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -34,7 +34,7 @@
#define HUGETLBFS_MAGIC 0x958458f6
static struct super_operations hugetlbfs_ops;
-static struct address_space_operations hugetlbfs_aops;
+static const struct address_space_operations hugetlbfs_aops;
const struct file_operations hugetlbfs_file_operations;
static struct inode_operations hugetlbfs_dir_inode_operations;
static struct inode_operations hugetlbfs_inode_operations;
@@ -547,7 +547,7 @@ static void hugetlbfs_destroy_inode(struct inode *inode)
kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
}
-static struct address_space_operations hugetlbfs_aops = {
+static const struct address_space_operations hugetlbfs_aops = {
.readpage = hugetlbfs_readpage,
.prepare_write = hugetlbfs_prepare_write,
.commit_write = hugetlbfs_commit_write,
diff --git a/fs/inode.c b/fs/inode.c
index 3a2446a27d2c..f42961eb983b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -102,7 +102,7 @@ static kmem_cache_t * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{
- static struct address_space_operations empty_aops;
+ static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops;
static const struct file_operations empty_fops;
struct inode *inode;
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 4917315db732..3a39158cca96 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -312,7 +312,7 @@ eio:
return err;
}
-struct address_space_operations zisofs_aops = {
+const struct address_space_operations zisofs_aops = {
.readpage = zisofs_readpage,
/* No sync_page operation supported? */
/* No bmap operation supported */
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 3f9c8ba1fa1f..bb11c7fb4019 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1054,7 +1054,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping,block,isofs_get_block);
}
-static struct address_space_operations isofs_aops = {
+static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
.sync_page = block_sync_page,
.bmap = _isofs_bmap
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index b87ba066f5e7..e6308c8b5735 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -176,5 +176,5 @@ isofs_normalize_block_and_offset(struct iso_directory_record* de,
extern struct inode_operations isofs_dir_inode_operations;
extern const struct file_operations isofs_dir_operations;
-extern struct address_space_operations isofs_symlink_aops;
+extern const struct address_space_operations isofs_symlink_aops;
extern struct export_operations isofs_export_ops;
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 4326cb47f8fa..f3a1db3098de 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -754,6 +754,6 @@ error:
return -EIO;
}
-struct address_space_operations isofs_symlink_aops = {
+const struct address_space_operations isofs_symlink_aops = {
.readpage = rock_ridge_symlink_readpage
};
diff --git a/fs/isofs/zisofs.h b/fs/isofs/zisofs.h
index d78485d101c2..273795709155 100644
--- a/fs/isofs/zisofs.h
+++ b/fs/isofs/zisofs.h
@@ -15,7 +15,7 @@
*/
#ifdef CONFIG_ZISOFS
-extern struct address_space_operations zisofs_aops;
+extern const struct address_space_operations zisofs_aops;
extern int __init zisofs_init(void);
extern void zisofs_cleanup(void);
#endif
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 7f96b5cb6781..8c9b28dff119 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -34,6 +34,7 @@
#include <linux/suspend.h>
#include <linux/pagemap.h>
#include <linux/kthread.h>
+#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
@@ -1675,7 +1676,7 @@ static void journal_free_journal_head(struct journal_head *jh)
{
#ifdef CONFIG_JBD_DEBUG
atomic_dec(&nr_journal_heads);
- memset(jh, 0x5b, sizeof(*jh));
+ memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
kmem_cache_free(journal_head_cache, jh);
}
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 9e46ea6da752..93068697a9bf 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -59,7 +59,7 @@ static const struct file_operations jffs_file_operations;
static struct inode_operations jffs_file_inode_operations;
static const struct file_operations jffs_dir_operations;
static struct inode_operations jffs_dir_inode_operations;
-static struct address_space_operations jffs_address_operations;
+static const struct address_space_operations jffs_address_operations;
kmem_cache_t *node_cache = NULL;
kmem_cache_t *fm_cache = NULL;
@@ -1614,7 +1614,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
} /* jffs_ioctl() */
-static struct address_space_operations jffs_address_operations = {
+static const struct address_space_operations jffs_address_operations = {
.readpage = jffs_readpage,
.prepare_write = jffs_prepare_write,
.commit_write = jffs_commit_write,
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 320dd48b834e..9c2077e7e081 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -267,6 +267,8 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
+ if (!value && rc == -ENODATA)
+ rc = 0;
if (value)
kfree(value);
if (!rc) {
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 1862e8bc101d..ad0121088dde 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -53,8 +53,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
spin_lock(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add(&jeb->list, &c->erase_pending_list);
+ list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
@@ -86,8 +85,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
/* Erase failed immediately. Refile it on the list */
D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
spin_lock(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add(&jeb->list, &c->erase_pending_list);
+ list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
@@ -161,8 +159,7 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
{
D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
spin_lock(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add_tail(&jeb->list, &c->erase_complete_list);
+ list_move_tail(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
/* Ensure that kupdated calls us again to mark them clean */
jffs2_erase_pending_trigger(c);
@@ -178,8 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
spin_lock(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add(&jeb->list, &c->erase_pending_list);
+ list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
@@ -191,8 +187,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
- list_del(&jeb->list);
- list_add(&jeb->list, &c->bad_list);
+ list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
wake_up(&c->erase_wait);
@@ -230,7 +225,6 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
- BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
prev = &ic->nodes;
continue;
}
@@ -254,7 +248,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
/* PARANOIA */
if (!ic) {
- printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
+ JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
+ " not found in remove_node_refs()!!\n");
return;
}
@@ -279,8 +274,19 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk("\n");
});
- if (ic->nodes == (void *)ic && ic->nlink == 0)
- jffs2_del_ino_cache(c, ic);
+ switch (ic->class) {
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case RAWNODE_CLASS_XATTR_DATUM:
+ jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ break;
+ case RAWNODE_CLASS_XATTR_REF:
+ jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ break;
+#endif
+ default:
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
+ jffs2_del_ino_cache(c, ic);
+ }
}
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index bb8844f40e48..3ed6e3e120b6 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -62,7 +62,7 @@ struct inode_operations jffs2_file_inode_operations =
.removexattr = jffs2_removexattr
};
-struct address_space_operations jffs2_file_address_operations =
+const struct address_space_operations jffs2_file_address_operations =
{
.readpage = jffs2_readpage,
.prepare_write =jffs2_prepare_write,
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 2900ec3ec3af..97caa77d60cf 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -227,8 +227,6 @@ void jffs2_clear_inode (struct inode *inode)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
-
- jffs2_xattr_delete_inode(c, f->inocache);
jffs2_do_clear_inode(c, f);
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 477c526d638b..daff3341ff92 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -165,6 +165,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
ic->ino));
spin_unlock(&c->inocache_lock);
+ jffs2_xattr_delete_inode(c, ic);
continue;
}
switch(ic->state) {
@@ -275,13 +276,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
* We can decide whether this node is inode or xattr by ic->class. */
if (ic->class == RAWNODE_CLASS_XATTR_DATUM
|| ic->class == RAWNODE_CLASS_XATTR_REF) {
- BUG_ON(raw->next_in_ino != (void *)ic);
spin_unlock(&c->erase_completion_lock);
if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
- ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
} else {
- ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
}
goto release_sem;
}
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 935fec1b1201..b98594992eed 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -119,8 +119,11 @@ struct jffs2_sb_info {
#ifdef CONFIG_JFFS2_FS_XATTR
#define XATTRINDEX_HASHSIZE (57)
uint32_t highest_xid;
+ uint32_t highest_xseqno;
struct list_head xattrindex[XATTRINDEX_HASHSIZE];
struct list_head xattr_unchecked;
+ struct list_head xattr_dead_list;
+ struct jffs2_xattr_ref *xref_dead_list;
struct jffs2_xattr_ref *xref_temp;
struct rw_semaphore xattr_sem;
uint32_t xdatum_mem_usage;
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 4889d0700c0e..8310c95478e9 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -291,6 +291,7 @@ struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
memset(xd, 0, sizeof(struct jffs2_xattr_datum));
xd->class = RAWNODE_CLASS_XATTR_DATUM;
+ xd->node = (void *)xd;
INIT_LIST_HEAD(&xd->xindex);
return xd;
}
@@ -309,6 +310,7 @@ struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
memset(ref, 0, sizeof(struct jffs2_xattr_ref));
ref->class = RAWNODE_CLASS_XATTR_REF;
+ ref->node = (void *)ref;
return ref;
}
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 927dfe42ba76..7675b33396c7 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -906,6 +906,9 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
+#ifdef CONFIG_JFFS2_FS_XATTR
+ BUG_ON(old->xref);
+#endif
dbg_inocache("del %p (ino #%u)\n", old, old->ino);
spin_lock(&c->inocache_lock);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 8bedfd2ff689..d88376992ed9 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -211,8 +211,7 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
struct jffs2_eraseblock *ejeb;
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
- list_del(&ejeb->list);
- list_add_tail(&ejeb->list, &c->erase_pending_list);
+ list_move_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
@@ -684,19 +683,26 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
spin_lock(&c->erase_completion_lock);
ic = jffs2_raw_ref_to_ic(ref);
- /* It seems we should never call jffs2_mark_node_obsolete() for
- XATTR nodes.... yet. Make sure we notice if/when we change
- that :) */
- BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
;
*p = ref->next_in_ino;
ref->next_in_ino = NULL;
- if (ic->nodes == (void *)ic && ic->nlink == 0)
- jffs2_del_ino_cache(c, ic);
-
+ switch (ic->class) {
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case RAWNODE_CLASS_XATTR_DATUM:
+ jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ break;
+ case RAWNODE_CLASS_XATTR_REF:
+ jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ break;
+#endif
+ default:
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
+ jffs2_del_ino_cache(c, ic);
+ break;
+ }
spin_unlock(&c->erase_completion_lock);
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 6b5223565405..9f41fc01a371 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -158,7 +158,7 @@ extern struct inode_operations jffs2_dir_inode_operations;
/* file.c */
extern const struct file_operations jffs2_file_operations;
extern struct inode_operations jffs2_file_inode_operations;
-extern struct address_space_operations jffs2_file_address_operations;
+extern const struct address_space_operations jffs2_file_address_operations;
int jffs2_fsync(struct file *, struct dentry *, int);
int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 5fec012b02ed..cc1899268c43 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
struct jffs2_full_dirent *fd, *fds;
int deleted;
+ jffs2_xattr_delete_inode(c, f->inocache);
down(&f->sem);
deleted = f->inocache && !f->inocache->nlink;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 61618080b86f..2bfdc33752d3 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -317,20 +317,23 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
struct jffs2_summary *s)
{
struct jffs2_xattr_datum *xd;
- uint32_t totlen, crc;
+ uint32_t xid, version, totlen, crc;
int err;
crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
if (crc != je32_to_cpu(rx->node_crc)) {
- if (je32_to_cpu(rx->node_crc) != 0xffffffff)
- JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ofs, je32_to_cpu(rx->node_crc), crc);
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rx->node_crc), crc);
if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
return err;
return 0;
}
- totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
+ xid = je32_to_cpu(rx->xid);
+ version = je32_to_cpu(rx->version);
+
+ totlen = PAD(sizeof(struct jffs2_raw_xattr)
+ + rx->name_len + 1 + je16_to_cpu(rx->value_len));
if (totlen != je32_to_cpu(rx->totlen)) {
JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
ofs, je32_to_cpu(rx->totlen), totlen);
@@ -339,22 +342,24 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
return 0;
}
- xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version));
- if (IS_ERR(xd)) {
- if (PTR_ERR(xd) == -EEXIST) {
- if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
- return err;
- return 0;
- }
+ xd = jffs2_setup_xattr_datum(c, xid, version);
+ if (IS_ERR(xd))
return PTR_ERR(xd);
- }
- xd->xprefix = rx->xprefix;
- xd->name_len = rx->name_len;
- xd->value_len = je16_to_cpu(rx->value_len);
- xd->data_crc = je32_to_cpu(rx->data_crc);
- xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
- /* FIXME */ xd->node->next_in_ino = (void *)xd;
+ if (xd->version > version) {
+ struct jffs2_raw_node_ref *raw
+ = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
+ raw->next_in_ino = xd->node->next_in_ino;
+ xd->node->next_in_ino = raw;
+ } else {
+ xd->version = version;
+ xd->xprefix = rx->xprefix;
+ xd->name_len = rx->name_len;
+ xd->value_len = je16_to_cpu(rx->value_len);
+ xd->data_crc = je32_to_cpu(rx->data_crc);
+
+ jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
+ }
if (jffs2_sum_active())
jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
@@ -373,9 +378,8 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
crc = crc32(0, rr, sizeof(*rr) - 4);
if (crc != je32_to_cpu(rr->node_crc)) {
- if (je32_to_cpu(rr->node_crc) != 0xffffffff)
- JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ofs, je32_to_cpu(rr->node_crc), crc);
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rr->node_crc), crc);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
return err;
return 0;
@@ -395,6 +399,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
return -ENOMEM;
/* BEFORE jffs2_build_xattr_subsystem() called,
+ * and AFTER xattr_ref is marked as a dead xref,
* ref->xid is used to store 32bit xid, xd is not used
* ref->ino is used to store 32bit inode-number, ic is not used
* Thoes variables are declared as union, thus using those
@@ -404,11 +409,13 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
*/
ref->ino = je32_to_cpu(rr->ino);
ref->xid = je32_to_cpu(rr->xid);
+ ref->xseqno = je32_to_cpu(rr->xseqno);
+ if (ref->xseqno > c->highest_xseqno)
+ c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
ref->next = c->xref_temp;
c->xref_temp = ref;
- ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL);
- /* FIXME */ ref->node->next_in_ino = (void *)ref;
+ jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
if (jffs2_sum_active())
jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 0b02fc79e4d1..c19bd476e8ec 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -5,7 +5,7 @@
* Zoltan Sogor <weth@inf.u-szeged.hu>,
* Patrik Kluba <pajko@halom.u-szeged.hu>,
* University of Szeged, Hungary
- * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com>
+ * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
@@ -43,7 +43,7 @@ int jffs2_sum_init(struct jffs2_sb_info *c)
return -ENOMEM;
}
- dbg_summary("returned succesfully\n");
+ dbg_summary("returned successfully\n");
return 0;
}
@@ -310,8 +310,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_sum_xattr_mem *temp;
- if (je32_to_cpu(node->x.version) == 0xffffffff)
- return 0;
temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
@@ -327,10 +325,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_sum_xref_mem *temp;
-
- if (je32_to_cpu(node->r.ino) == 0xffffffff
- && je32_to_cpu(node->r.xid) == 0xffffffff)
- return 0;
temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
@@ -483,22 +477,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
je32_to_cpu(spx->version));
- if (IS_ERR(xd)) {
- if (PTR_ERR(xd) == -EEXIST) {
- /* a newer version of xd exists */
- if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen))))
- return err;
- sp += JFFS2_SUMMARY_XATTR_SIZE;
- break;
- }
- JFFS2_NOTICE("allocation of xattr_datum failed\n");
+ if (IS_ERR(xd))
return PTR_ERR(xd);
+ if (xd->version > je32_to_cpu(spx->version)) {
+ /* node is not the newest one */
+ struct jffs2_raw_node_ref *raw
+ = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spx->totlen)), NULL);
+ raw->next_in_ino = xd->node->next_in_ino;
+ xd->node->next_in_ino = raw;
+ } else {
+ xd->version = je32_to_cpu(spx->version);
+ sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spx->totlen)), (void *)xd);
}
-
- xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
- PAD(je32_to_cpu(spx->totlen)), NULL);
- /* FIXME */ xd->node->next_in_ino = (void *)xd;
-
*pseudo_random += je32_to_cpu(spx->xid);
sp += JFFS2_SUMMARY_XATTR_SIZE;
@@ -519,14 +511,11 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
JFFS2_NOTICE("allocation of xattr_datum failed\n");
return -ENOMEM;
}
- ref->ino = 0xfffffffe;
- ref->xid = 0xfffffffd;
ref->next = c->xref_temp;
c->xref_temp = ref;
- ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
- PAD(sizeof(struct jffs2_raw_xref)), NULL);
- /* FIXME */ ref->node->next_in_ino = (void *)ref;
+ sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
+ PAD(sizeof(struct jffs2_raw_xref)), (void *)ref);
*pseudo_random += ref->node->flash_offset;
sp += JFFS2_SUMMARY_XREF_SIZE;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index a7f153f79ecb..b9b700730dfe 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -495,8 +495,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* Fix up the original jeb now it's on the bad_list */
if (first_raw == jeb->first_node) {
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
- list_del(&jeb->list);
- list_add(&jeb->list, &c->erase_pending_list);
+ list_move(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
}
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 2d82e250be34..18e66dbf23b4 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -23,18 +23,15 @@
* xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
* is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
* the index of the xattr name/value pair cache (c->xattrindex).
+ * is_xattr_datum_unchecked(c, xd)
+ * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not
+ * unchecked, it returns 0.
* unload_xattr_datum(c, xd)
* is used to release xattr name/value pair and detach from c->xattrindex.
* reclaim_xattr_datum(c)
* is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
* memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
* is hard coded as 32KiB.
- * delete_xattr_datum_node(c, xd)
- * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is
- * enabled, it overwrites the obsolete node by myself.
- * delete_xattr_datum(c, xd)
- * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference
- * counter. (It means how many jffs2_xattr_ref object refers this xdatum.)
* do_verify_xattr_datum(c, xd)
* is used to load the xdatum informations without name/value pair from the medium.
* It's necessary once, because those informations are not collected during mounting
@@ -53,8 +50,10 @@
* is used to write xdatum to medium. xd->version will be incremented.
* create_xattr_datum(c, xprefix, xname, xvalue, xsize)
* is used to create new xdatum and write to medium.
+ * delete_xattr_datum(c, xd)
+ * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows
+ * GC to reclaim those physical nodes.
* -------------------------------------------------- */
-
static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
{
int name_len = strlen(xname);
@@ -62,6 +61,22 @@ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *
return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
}
+static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ struct jffs2_raw_node_ref *raw;
+ int rc = 0;
+
+ spin_lock(&c->erase_completion_lock);
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock(&c->erase_completion_lock);
+ return rc;
+}
+
static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
@@ -107,77 +122,33 @@ static void reclaim_xattr_datum(struct jffs2_sb_info *c)
before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
}
-static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
-{
- /* must be called under down_write(xattr_sem) */
- struct jffs2_raw_xattr rx;
- size_t length;
- int rc;
-
- if (!xd->node) {
- JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid);
- return;
- }
- if (jffs2_sum_active()) {
- memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr));
- rc = jffs2_flash_read(c, ref_offset(xd->node),
- sizeof(struct jffs2_unknown_node),
- &length, (char *)&rx);
- if (rc || length != sizeof(struct jffs2_unknown_node)) {
- JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(struct jffs2_unknown_node),
- length, ref_offset(xd->node));
- }
- rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx),
- &length, (char *)&rx);
- if (rc || length != sizeof(struct jffs2_raw_xattr)) {
- JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n",
- rc, sizeof(rx), length, ref_offset(xd->node));
- }
- }
- spin_lock(&c->erase_completion_lock);
- xd->node->next_in_ino = NULL;
- spin_unlock(&c->erase_completion_lock);
- jffs2_mark_node_obsolete(c, xd->node);
- xd->node = NULL;
-}
-
-static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
-{
- /* must be called under down_write(xattr_sem) */
- BUG_ON(xd->refcnt);
-
- unload_xattr_datum(c, xd);
- if (xd->node) {
- delete_xattr_datum_node(c, xd);
- xd->node = NULL;
- }
- jffs2_free_xattr_datum(xd);
-}
-
static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xattr rx;
size_t readlen;
- uint32_t crc, totlen;
+ uint32_t crc, offset, totlen;
int rc;
- BUG_ON(!xd->node);
- BUG_ON(ref_flags(xd->node) != REF_UNCHECKED);
+ spin_lock(&c->erase_completion_lock);
+ offset = ref_offset(xd->node);
+ if (ref_flags(xd->node) == REF_PRISTINE)
+ goto complete;
+ spin_unlock(&c->erase_completion_lock);
- rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx);
+ rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx);
if (rc || readlen != sizeof(rx)) {
JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(rx), readlen, ref_offset(xd->node));
+ rc, sizeof(rx), readlen, offset);
return rc ? rc : -EIO;
}
crc = crc32(0, &rx, sizeof(rx) - 4);
if (crc != je32_to_cpu(rx.node_crc)) {
- if (je32_to_cpu(rx.node_crc) != 0xffffffff)
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc);
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ offset, je32_to_cpu(rx.hdr_crc), crc);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
@@ -188,11 +159,12 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
|| je32_to_cpu(rx.version) != xd->version) {
JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
"nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
- ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
+ offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
je32_to_cpu(rx.totlen), totlen,
je32_to_cpu(rx.xid), xd->xid,
je32_to_cpu(rx.version), xd->version);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
xd->xprefix = rx.xprefix;
@@ -200,14 +172,17 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
xd->value_len = je16_to_cpu(rx.value_len);
xd->data_crc = je32_to_cpu(rx.data_crc);
- /* This JFFS2_NODETYPE_XATTR node is checked */
- jeb = &c->blocks[ref_offset(xd->node) / c->sector_size];
- totlen = PAD(je32_to_cpu(rx.totlen));
-
spin_lock(&c->erase_completion_lock);
- c->unchecked_size -= totlen; c->used_size += totlen;
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
- xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE;
+ complete:
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ }
+ raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL);
+ }
spin_unlock(&c->erase_completion_lock);
/* unchecked xdatum is chained with c->xattr_unchecked */
@@ -227,7 +202,6 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
uint32_t crc, length;
int i, ret, retry = 0;
- BUG_ON(!xd->node);
BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
BUG_ON(!list_empty(&xd->xindex));
retry:
@@ -253,6 +227,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
" at %#08x, read: 0x%08x calculated: 0x%08x\n",
ref_offset(xd->node), xd->data_crc, crc);
kfree(data);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
@@ -286,16 +261,14 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
* rc > 0 : Unrecoverable error, this node should be deleted.
*/
int rc = 0;
- BUG_ON(xd->xname);
- if (!xd->node)
+
+ BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD);
+ if (xd->xname)
+ return 0;
+ if (xd->flags & JFFS2_XFLAGS_INVALID)
return EIO;
- if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) {
+ if (unlikely(is_xattr_datum_unchecked(c, xd)))
rc = do_verify_xattr_datum(c, xd);
- if (rc > 0) {
- list_del_init(&xd->xindex);
- delete_xattr_datum_node(c, xd);
- }
- }
if (!rc)
rc = do_load_xattr_datum(c, xd);
return rc;
@@ -304,7 +277,6 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
- struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xattr rx;
struct kvec vecs[2];
size_t length;
@@ -312,14 +284,16 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
uint32_t phys_ofs = write_ofs(c);
BUG_ON(!xd->xname);
+ BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID));
vecs[0].iov_base = &rx;
- vecs[0].iov_len = PAD(sizeof(rx));
+ vecs[0].iov_len = sizeof(rx);
vecs[1].iov_base = xd->xname;
vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
totlen = vecs[0].iov_len + vecs[1].iov_len;
/* Setup raw-xattr */
+ memset(&rx, 0, sizeof(rx));
rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
rx.totlen = cpu_to_je32(PAD(totlen));
@@ -343,14 +317,8 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
return rc;
}
-
/* success */
- raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL);
- /* FIXME */ raw->next_in_ino = (void *)xd;
-
- if (xd->node)
- delete_xattr_datum_node(c, xd);
- xd->node = raw;
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd);
dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
xd->xid, xd->version, xd->xprefix, xd->xname);
@@ -377,7 +345,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
&& xd->value_len==xsize
&& !strcmp(xd->xname, xname)
&& !memcmp(xd->xvalue, xvalue, xsize)) {
- xd->refcnt++;
+ atomic_inc(&xd->refcnt);
return xd;
}
}
@@ -397,7 +365,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
strcpy(data, xname);
memcpy(data + name_len + 1, xvalue, xsize);
- xd->refcnt = 1;
+ atomic_set(&xd->refcnt, 1);
xd->xid = ++c->highest_xid;
xd->flags |= JFFS2_XFLAGS_HOT;
xd->xprefix = xprefix;
@@ -426,20 +394,36 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
return xd;
}
+static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ BUG_ON(atomic_read(&xd->refcnt));
+
+ unload_xattr_datum(c, xd);
+ xd->flags |= JFFS2_XFLAGS_DEAD;
+ spin_lock(&c->erase_completion_lock);
+ if (xd->node == (void *)xd) {
+ BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID));
+ jffs2_free_xattr_datum(xd);
+ } else {
+ list_add(&xd->xindex, &c->xattr_dead_list);
+ }
+ spin_unlock(&c->erase_completion_lock);
+ dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version);
+}
+
/* -------- xref related functions ------------------
* verify_xattr_ref(c, ref)
* is used to load xref information from medium. Because summary data does not
* contain xid/ino, it's necessary to verify once while mounting process.
- * delete_xattr_ref_node(c, ref)
- * is used to delete a jffs2 node is dominated by xref. When EBS is enabled,
- * it overwrites the obsolete node by myself.
- * delete_xattr_ref(c, ref)
- * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum
- * is refered by this xref become 0, delete_xattr_datum() is called later.
* save_xattr_ref(c, ref)
- * is used to write xref to medium.
+ * is used to write xref to medium. If delete marker is marked, it write
+ * a delete marker of xref into medium.
* create_xattr_ref(c, ic, xd)
* is used to create a new xref and write to medium.
+ * delete_xattr_ref(c, ref)
+ * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER,
+ * and allows GC to reclaim those physical nodes.
* jffs2_xattr_delete_inode(c, ic)
* is called to remove xrefs related to obsolete inode when inode is unlinked.
* jffs2_xattr_free_inode(c, ic)
@@ -450,25 +434,29 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
{
struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xref rr;
size_t readlen;
- uint32_t crc, totlen;
+ uint32_t crc, offset, totlen;
int rc;
- BUG_ON(ref_flags(ref->node) != REF_UNCHECKED);
+ spin_lock(&c->erase_completion_lock);
+ if (ref_flags(ref->node) != REF_UNCHECKED)
+ goto complete;
+ offset = ref_offset(ref->node);
+ spin_unlock(&c->erase_completion_lock);
- rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr);
+ rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr);
if (rc || sizeof(rr) != readlen) {
JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
- rc, sizeof(rr), readlen, ref_offset(ref->node));
+ rc, sizeof(rr), readlen, offset);
return rc ? rc : -EIO;
}
/* obsolete node */
crc = crc32(0, &rr, sizeof(rr) - 4);
if (crc != je32_to_cpu(rr.node_crc)) {
- if (je32_to_cpu(rr.node_crc) != 0xffffffff)
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc);
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ offset, je32_to_cpu(rr.node_crc), crc);
return EIO;
}
if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
@@ -476,22 +464,28 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
|| je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
"nodetype=%#04x/%#04x, totlen=%u/%zu\n",
- ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
+ offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
return EIO;
}
ref->ino = je32_to_cpu(rr.ino);
ref->xid = je32_to_cpu(rr.xid);
-
- /* fixup superblock/eraseblock info */
- jeb = &c->blocks[ref_offset(ref->node) / c->sector_size];
- totlen = PAD(sizeof(rr));
+ ref->xseqno = je32_to_cpu(rr.xseqno);
+ if (ref->xseqno > c->highest_xseqno)
+ c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
spin_lock(&c->erase_completion_lock);
- c->unchecked_size -= totlen; c->used_size += totlen;
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
- ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE;
+ complete:
+ for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) {
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ }
+ raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL);
+ }
spin_unlock(&c->erase_completion_lock);
dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
@@ -499,58 +493,12 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
return 0;
}
-static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
-{
- struct jffs2_raw_xref rr;
- size_t length;
- int rc;
-
- if (jffs2_sum_active()) {
- memset(&rr, 0xff, sizeof(rr));
- rc = jffs2_flash_read(c, ref_offset(ref->node),
- sizeof(struct jffs2_unknown_node),
- &length, (char *)&rr);
- if (rc || length != sizeof(struct jffs2_unknown_node)) {
- JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(struct jffs2_unknown_node),
- length, ref_offset(ref->node));
- }
- rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr),
- &length, (char *)&rr);
- if (rc || length != sizeof(struct jffs2_raw_xref)) {
- JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n",
- rc, sizeof(rr), length, ref_offset(ref->node));
- }
- }
- spin_lock(&c->erase_completion_lock);
- ref->node->next_in_ino = NULL;
- spin_unlock(&c->erase_completion_lock);
- jffs2_mark_node_obsolete(c, ref->node);
- ref->node = NULL;
-}
-
-static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
-{
- /* must be called under down_write(xattr_sem) */
- struct jffs2_xattr_datum *xd;
-
- BUG_ON(!ref->node);
- delete_xattr_ref_node(c, ref);
-
- xd = ref->xd;
- xd->refcnt--;
- if (!xd->refcnt)
- delete_xattr_datum(c, xd);
- jffs2_free_xattr_ref(ref);
-}
-
static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
{
/* must be called under down_write(xattr_sem) */
- struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xref rr;
size_t length;
- uint32_t phys_ofs = write_ofs(c);
+ uint32_t xseqno, phys_ofs = write_ofs(c);
int ret;
rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -558,8 +506,16 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
- rr.ino = cpu_to_je32(ref->ic->ino);
- rr.xid = cpu_to_je32(ref->xd->xid);
+ xseqno = (c->highest_xseqno += 2);
+ if (is_xattr_ref_dead(ref)) {
+ xseqno |= XREF_DELETE_MARKER;
+ rr.ino = cpu_to_je32(ref->ino);
+ rr.xid = cpu_to_je32(ref->xid);
+ } else {
+ rr.ino = cpu_to_je32(ref->ic->ino);
+ rr.xid = cpu_to_je32(ref->xd->xid);
+ }
+ rr.xseqno = cpu_to_je32(xseqno);
rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
@@ -572,12 +528,9 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
return ret;
}
-
- raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL);
- /* FIXME */ raw->next_in_ino = (void *)ref;
- if (ref->node)
- delete_xattr_ref_node(c, ref);
- ref->node = raw;
+ /* success */
+ ref->xseqno = xseqno;
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref);
dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
@@ -610,6 +563,27 @@ static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct
return ref; /* success */
}
+static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_datum *xd;
+
+ xd = ref->xd;
+ ref->xseqno |= XREF_DELETE_MARKER;
+ ref->ino = ref->ic->ino;
+ ref->xid = ref->xd->xid;
+ spin_lock(&c->erase_completion_lock);
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ spin_unlock(&c->erase_completion_lock);
+
+ dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n",
+ ref->ino, ref->xid, ref->xseqno);
+
+ if (atomic_dec_and_test(&xd->refcnt))
+ delete_xattr_datum(c, xd);
+}
+
void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
/* It's called from jffs2_clear_inode() on inode removing.
@@ -638,8 +612,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
for (ref = ic->xref; ref; ref = _ref) {
_ref = ref->next;
xd = ref->xd;
- xd->refcnt--;
- if (!xd->refcnt) {
+ if (atomic_dec_and_test(&xd->refcnt)) {
unload_xattr_datum(c, xd);
jffs2_free_xattr_datum(xd);
}
@@ -655,7 +628,7 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
* duplicate name/value pairs. If duplicate name/value pair would be found,
* one will be removed.
*/
- struct jffs2_xattr_ref *ref, *cmp, **pref;
+ struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp;
int rc = 0;
if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
@@ -673,13 +646,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
} else if (unlikely(rc < 0))
goto out;
}
- for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) {
+ for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) {
if (!cmp->xd->xname) {
ref->xd->flags |= JFFS2_XFLAGS_BIND;
rc = load_xattr_datum(c, cmp->xd);
ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
if (unlikely(rc > 0)) {
- *pref = cmp->next;
+ *pcmp = cmp->next;
delete_xattr_ref(c, cmp);
goto retry;
} else if (unlikely(rc < 0))
@@ -687,8 +660,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
}
if (ref->xd->xprefix == cmp->xd->xprefix
&& !strcmp(ref->xd->xname, cmp->xd->xname)) {
- *pref = cmp->next;
- delete_xattr_ref(c, cmp);
+ if (ref->xseqno > cmp->xseqno) {
+ *pcmp = cmp->next;
+ delete_xattr_ref(c, cmp);
+ } else {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ }
goto retry;
}
}
@@ -719,9 +697,13 @@ void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
for (i=0; i < XATTRINDEX_HASHSIZE; i++)
INIT_LIST_HEAD(&c->xattrindex[i]);
INIT_LIST_HEAD(&c->xattr_unchecked);
+ INIT_LIST_HEAD(&c->xattr_dead_list);
+ c->xref_dead_list = NULL;
c->xref_temp = NULL;
init_rwsem(&c->xattr_sem);
+ c->highest_xid = 0;
+ c->highest_xseqno = 0;
c->xdatum_mem_usage = 0;
c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
}
@@ -751,7 +733,11 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
_ref = ref->next;
jffs2_free_xattr_ref(ref);
}
- c->xref_temp = NULL;
+
+ for (ref=c->xref_dead_list; ref; ref = _ref) {
+ _ref = ref->next;
+ jffs2_free_xattr_ref(ref);
+ }
for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
@@ -761,100 +747,143 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
jffs2_free_xattr_datum(xd);
}
}
+
+ list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) {
+ list_del(&xd->xindex);
+ jffs2_free_xattr_datum(xd);
+ }
}
+#define XREF_TMPHASH_SIZE (128)
void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
{
struct jffs2_xattr_ref *ref, *_ref;
+ struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
struct jffs2_xattr_datum *xd, *_xd;
struct jffs2_inode_cache *ic;
- int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0;
+ struct jffs2_raw_node_ref *raw;
+ int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0;
+ int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0;
BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
- /* Phase.1 */
+ /* Phase.1 : Merge same xref */
+ for (i=0; i < XREF_TMPHASH_SIZE; i++)
+ xref_tmphash[i] = NULL;
for (ref=c->xref_temp; ref; ref=_ref) {
+ struct jffs2_xattr_ref *tmp;
+
_ref = ref->next;
- /* checking REF_UNCHECKED nodes */
if (ref_flags(ref->node) != REF_PRISTINE) {
if (verify_xattr_ref(c, ref)) {
- delete_xattr_ref_node(c, ref);
+ BUG_ON(ref->node->next_in_ino != (void *)ref);
+ ref->node->next_in_ino = NULL;
+ jffs2_mark_node_obsolete(c, ref->node);
jffs2_free_xattr_ref(ref);
continue;
}
}
- /* At this point, ref->xid and ref->ino contain XID and inode number.
- ref->xd and ref->ic are not valid yet. */
- xd = jffs2_find_xattr_datum(c, ref->xid);
- ic = jffs2_get_ino_cache(c, ref->ino);
- if (!xd || !ic) {
- if (ref_flags(ref->node) != REF_UNCHECKED)
- JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n",
- ref->ino, ref->xid);
- delete_xattr_ref_node(c, ref);
+
+ i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE;
+ for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) {
+ if (tmp->ino == ref->ino && tmp->xid == ref->xid)
+ break;
+ }
+ if (tmp) {
+ raw = ref->node;
+ if (ref->xseqno > tmp->xseqno) {
+ tmp->xseqno = ref->xseqno;
+ raw->next_in_ino = tmp->node;
+ tmp->node = raw;
+ } else {
+ raw->next_in_ino = tmp->node->next_in_ino;
+ tmp->node->next_in_ino = raw;
+ }
jffs2_free_xattr_ref(ref);
continue;
+ } else {
+ ref->next = xref_tmphash[i];
+ xref_tmphash[i] = ref;
}
- ref->xd = xd;
- ref->ic = ic;
- xd->refcnt++;
- ref->next = ic->xref;
- ic->xref = ref;
- xref_count++;
}
c->xref_temp = NULL;
- /* After this, ref->xid/ino are NEVER used. */
- /* Phase.2 */
+ /* Phase.2 : Bind xref with inode_cache and xattr_datum */
+ for (i=0; i < XREF_TMPHASH_SIZE; i++) {
+ for (ref=xref_tmphash[i]; ref; ref=_ref) {
+ xref_count++;
+ _ref = ref->next;
+ if (is_xattr_ref_dead(ref)) {
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ xref_dead_count++;
+ continue;
+ }
+ /* At this point, ref->xid and ref->ino contain XID and inode number.
+ ref->xd and ref->ic are not valid yet. */
+ xd = jffs2_find_xattr_datum(c, ref->xid);
+ ic = jffs2_get_ino_cache(c, ref->ino);
+ if (!xd || !ic) {
+ dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n",
+ ref->ino, ref->xid, ref->xseqno);
+ ref->xseqno |= XREF_DELETE_MARKER;
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ xref_orphan_count++;
+ continue;
+ }
+ ref->xd = xd;
+ ref->ic = ic;
+ atomic_inc(&xd->refcnt);
+ ref->next = ic->xref;
+ ic->xref = ref;
+ }
+ }
+
+ /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */
for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
+ xdatum_count++;
list_del_init(&xd->xindex);
- if (!xd->refcnt) {
- if (ref_flags(xd->node) != REF_UNCHECKED)
- JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n",
- xd->xid, xd->version, ref_offset(xd->node));
- delete_xattr_datum(c, xd);
+ if (!atomic_read(&xd->refcnt)) {
+ dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n",
+ xd->xid, xd->version);
+ xd->flags |= JFFS2_XFLAGS_DEAD;
+ list_add(&xd->xindex, &c->xattr_unchecked);
+ xdatum_orphan_count++;
continue;
}
- if (ref_flags(xd->node) != REF_PRISTINE) {
- dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n",
- xd->xid, ref_offset(xd->node));
+ if (is_xattr_datum_unchecked(c, xd)) {
+ dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n",
+ xd->xid, xd->version);
list_add(&xd->xindex, &c->xattr_unchecked);
xdatum_unchecked_count++;
}
- xdatum_count++;
}
}
/* build complete */
- JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and "
- "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count);
+ JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum"
+ " (%u unchecked, %u orphan) and "
+ "%u of xref (%u dead, %u orphan) found.\n",
+ xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
+ xref_count, xref_dead_count, xref_orphan_count);
}
struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
uint32_t xid, uint32_t version)
{
- struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_xattr_datum *xd;
- _xd = jffs2_find_xattr_datum(c, xid);
- if (_xd) {
- dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n",
- xid, version, _xd->version, ref_offset(_xd->node));
- if (version < _xd->version)
- return ERR_PTR(-EEXIST);
- }
- xd = jffs2_alloc_xattr_datum();
- if (!xd)
- return ERR_PTR(-ENOMEM);
- xd->xid = xid;
- xd->version = version;
- if (xd->xid > c->highest_xid)
- c->highest_xid = xd->xid;
- list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
-
- if (_xd) {
- list_del_init(&_xd->xindex);
- delete_xattr_datum_node(c, _xd);
- jffs2_free_xattr_datum(_xd);
+ xd = jffs2_find_xattr_datum(c, xid);
+ if (!xd) {
+ xd = jffs2_alloc_xattr_datum();
+ if (!xd)
+ return ERR_PTR(-ENOMEM);
+ xd->xid = xid;
+ xd->version = version;
+ if (xd->xid > c->highest_xid)
+ c->highest_xid = xd->xid;
+ list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
}
return xd;
}
@@ -1080,9 +1109,23 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
goto out;
}
if (!buffer) {
- *pref = ref->next;
- delete_xattr_ref(c, ref);
- rc = 0;
+ ref->ino = ic->ino;
+ ref->xid = xd->xid;
+ ref->xseqno |= XREF_DELETE_MARKER;
+ rc = save_xattr_ref(c, ref);
+ if (!rc) {
+ *pref = ref->next;
+ spin_lock(&c->erase_completion_lock);
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ spin_unlock(&c->erase_completion_lock);
+ if (atomic_dec_and_test(&xd->refcnt))
+ delete_xattr_datum(c, xd);
+ } else {
+ ref->ic = ic;
+ ref->xd = xd;
+ ref->xseqno &= ~XREF_DELETE_MARKER;
+ }
goto out;
}
goto found;
@@ -1094,7 +1137,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
goto out;
}
if (!buffer) {
- rc = -EINVAL;
+ rc = -ENODATA;
goto out;
}
found:
@@ -1110,16 +1153,14 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
request = PAD(sizeof(struct jffs2_raw_xref));
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
+ down_write(&c->xattr_sem);
if (rc) {
JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
- down_write(&c->xattr_sem);
- xd->refcnt--;
- if (!xd->refcnt)
+ if (atomic_dec_and_test(&xd->refcnt))
delete_xattr_datum(c, xd);
up_write(&c->xattr_sem);
return rc;
}
- down_write(&c->xattr_sem);
if (ref)
*pref = ref->next;
newref = create_xattr_ref(c, ic, xd);
@@ -1129,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
ic->xref = ref;
}
rc = PTR_ERR(newref);
- xd->refcnt--;
- if (!xd->refcnt)
+ if (atomic_dec_and_test(&xd->refcnt))
delete_xattr_datum(c, xd);
} else if (ref) {
delete_xattr_ref(c, ref);
@@ -1142,38 +1182,40 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
}
/* -------- garbage collector functions -------------
- * jffs2_garbage_collect_xattr_datum(c, xd)
+ * jffs2_garbage_collect_xattr_datum(c, xd, raw)
* is used to move xdatum into new node.
- * jffs2_garbage_collect_xattr_ref(c, ref)
+ * jffs2_garbage_collect_xattr_ref(c, ref, raw)
* is used to move xref into new node.
* jffs2_verify_xattr(c)
* is used to call do_verify_xattr_datum() before garbage collecting.
+ * jffs2_release_xattr_datum(c, xd)
+ * is used to release an in-memory object of xdatum.
+ * jffs2_release_xattr_ref(c, ref)
+ * is used to release an in-memory object of xref.
* -------------------------------------------------- */
-int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
+ struct jffs2_raw_node_ref *raw)
{
uint32_t totlen, length, old_ofs;
- int rc = -EINVAL;
+ int rc = 0;
down_write(&c->xattr_sem);
- BUG_ON(!xd->node);
-
- old_ofs = ref_offset(xd->node);
- totlen = ref_totlen(c, c->gcblock, xd->node);
- if (totlen < sizeof(struct jffs2_raw_xattr))
+ if (xd->node != raw)
+ goto out;
+ if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID))
goto out;
- if (!xd->xname) {
- rc = load_xattr_datum(c, xd);
- if (unlikely(rc > 0)) {
- delete_xattr_datum_node(c, xd);
- rc = 0;
- goto out;
- } else if (unlikely(rc < 0))
- goto out;
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc)) {
+ rc = (rc > 0) ? 0 : rc;
+ goto out;
}
+ old_ofs = ref_offset(xd->node);
+ totlen = PAD(sizeof(struct jffs2_raw_xattr)
+ + xd->name_len + 1 + xd->value_len);
rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
- if (rc || length < totlen) {
- JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen);
+ if (rc) {
+ JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen);
rc = rc ? rc : -EBADFD;
goto out;
}
@@ -1182,27 +1224,32 @@ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xatt
dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
xd->xid, xd->version, old_ofs, ref_offset(xd->node));
out:
+ if (!rc)
+ jffs2_mark_node_obsolete(c, raw);
up_write(&c->xattr_sem);
return rc;
}
-
-int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
+ struct jffs2_raw_node_ref *raw)
{
uint32_t totlen, length, old_ofs;
- int rc = -EINVAL;
+ int rc = 0;
down_write(&c->xattr_sem);
BUG_ON(!ref->node);
+ if (ref->node != raw)
+ goto out;
+ if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref))
+ goto out;
+
old_ofs = ref_offset(ref->node);
totlen = ref_totlen(c, c->gcblock, ref->node);
- if (totlen != sizeof(struct jffs2_raw_xref))
- goto out;
rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
- if (rc || length < totlen) {
- JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n",
+ if (rc) {
+ JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
__FUNCTION__, rc, totlen);
rc = rc ? rc : -EBADFD;
goto out;
@@ -1212,6 +1259,8 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
out:
+ if (!rc)
+ jffs2_mark_node_obsolete(c, raw);
up_write(&c->xattr_sem);
return rc;
}
@@ -1219,20 +1268,59 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
int jffs2_verify_xattr(struct jffs2_sb_info *c)
{
struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
+ uint32_t totlen;
int rc;
down_write(&c->xattr_sem);
list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
rc = do_verify_xattr_datum(c, xd);
- if (rc == 0) {
- list_del_init(&xd->xindex);
- break;
- } else if (rc > 0) {
- list_del_init(&xd->xindex);
- delete_xattr_datum_node(c, xd);
+ if (rc < 0)
+ continue;
+ list_del_init(&xd->xindex);
+ spin_lock(&c->erase_completion_lock);
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ if (ref_flags(raw) != REF_UNCHECKED)
+ continue;
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ raw->flash_offset = ref_offset(raw)
+ | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL);
}
+ if (xd->flags & JFFS2_XFLAGS_DEAD)
+ list_add(&xd->xindex, &c->xattr_dead_list);
+ spin_unlock(&c->erase_completion_lock);
}
up_write(&c->xattr_sem);
-
return list_empty(&c->xattr_unchecked) ? 1 : 0;
}
+
+void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under spin_lock(&c->erase_completion_lock) */
+ if (atomic_read(&xd->refcnt) || xd->node != (void *)xd)
+ return;
+
+ list_del(&xd->xindex);
+ jffs2_free_xattr_datum(xd);
+}
+
+void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under spin_lock(&c->erase_completion_lock) */
+ struct jffs2_xattr_ref *tmp, **ptmp;
+
+ if (ref->node != (void *)ref)
+ return;
+
+ for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) {
+ if (ref == tmp) {
+ *ptmp = tmp->next;
+ break;
+ }
+ }
+ jffs2_free_xattr_ref(ref);
+}
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 2c199856c582..06a5c69dcf8b 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -16,6 +16,8 @@
#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
+#define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */
+#define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */
struct jffs2_xattr_datum
{
@@ -23,10 +25,10 @@ struct jffs2_xattr_datum
struct jffs2_raw_node_ref *node;
uint8_t class;
uint8_t flags;
- uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
+ uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
struct list_head xindex; /* chained from c->xattrindex[n] */
- uint32_t refcnt; /* # of xattr_ref refers this */
+ atomic_t refcnt; /* # of xattr_ref refers this */
uint32_t xid;
uint32_t version;
@@ -47,6 +49,7 @@ struct jffs2_xattr_ref
uint8_t flags; /* Currently unused */
u16 unused;
+ uint32_t xseqno;
union {
struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
uint32_t ino; /* only used in scanning/building */
@@ -58,6 +61,12 @@ struct jffs2_xattr_ref
struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
};
+#define XREF_DELETE_MARKER (0x00000001)
+static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
+{
+ return ((ref->xseqno & XREF_DELETE_MARKER) != 0);
+}
+
#ifdef CONFIG_JFFS2_FS_XATTR
extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
@@ -70,9 +79,13 @@ extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c
extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
-extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
-extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
+extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
+ struct jffs2_raw_node_ref *raw);
+extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
+ struct jffs2_raw_node_ref *raw);
extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
+extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
+extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
char *buffer, size_t size);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 04eb78f1252e..43e3f566aad6 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -305,7 +305,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
offset, nr_segs, jfs_get_block, NULL);
}
-struct address_space_operations jfs_aops = {
+const struct address_space_operations jfs_aops = {
.readpage = jfs_readpage,
.readpages = jfs_readpages,
.writepage = jfs_writepage,
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5549378358bf..4d52593a5fc6 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
- * if this fails (xlen contigious free blocks not avaliable), it'll
+ * if this fails (xlen contiguous free blocks not avaliable), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
@@ -493,7 +493,7 @@ int extFill(struct inode *ip, xad_t * xp)
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
- * contigious free blocks not avaliable), we'll try to allocate
+ * contiguous free blocks not avaliable), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
@@ -529,7 +529,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
- * number is greater than the maximum number of contigious free
+ * number is greater than the maximum number of contiguous free
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
@@ -586,7 +586,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* in place. if this fails, we'll try to move the extent
* to a new set of blocks. if moving the extent, we initially
* will try to allocate disk blocks for the requested size
- * (nnew). if this fails (nnew contigious free blocks not
+ * (nnew). if this fails (new contiguous free blocks not
* avaliable), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index c30072674464..b5c7da6190dc 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -33,7 +33,7 @@ extern void jfs_free_zero_link(struct inode *);
extern struct dentry *jfs_get_parent(struct dentry *dentry);
extern void jfs_set_inode_flags(struct inode *);
-extern struct address_space_operations jfs_aops;
+extern const struct address_space_operations jfs_aops;
extern struct inode_operations jfs_dir_inode_operations;
extern const struct file_operations jfs_dir_operations;
extern struct inode_operations jfs_file_inode_operations;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 7f6e88039700..e1e0a6e6ebdf 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -577,7 +577,7 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
metapage_releasepage(page, 0);
}
-struct address_space_operations jfs_metapage_aops = {
+const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.writepage = metapage_writepage,
.sync_page = block_sync_page,
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index f0b7d3282b07..d17a3290f5aa 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -139,7 +139,7 @@ static inline void metapage_homeok(struct metapage *mp)
put_metapage(mp);
}
-extern struct address_space_operations jfs_metapage_aops;
+extern const struct address_space_operations jfs_metapage_aops;
/*
* This routines invalidate all pages for an extent.
diff --git a/fs/libfs.c b/fs/libfs.c
index fc785d8befb9..ac02ea602c3d 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -149,10 +149,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
/* fallthrough */
default:
spin_lock(&dcache_lock);
- if (filp->f_pos == 2) {
- list_del(q);
- list_add(q, &dentry->d_subdirs);
- }
+ if (filp->f_pos == 2)
+ list_move(q, &dentry->d_subdirs);
+
for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
struct dentry *next;
next = list_entry(p, struct dentry, d_u.d_child);
@@ -164,8 +163,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
return 0;
spin_lock(&dcache_lock);
/* next is still alive */
- list_del(q);
- list_add(q, p);
+ list_move(q, p);
p = q;
filp->f_pos++;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index a6fb509b7341..9ea91c5eeb7b 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -335,7 +335,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,minix_get_block);
}
-static struct address_space_operations minix_aops = {
+static const struct address_space_operations minix_aops = {
.readpage = minix_readpage,
.writepage = minix_writepage,
.sync_page = block_sync_page,
diff --git a/fs/namespace.c b/fs/namespace.c
index 866430bb024d..b3ed212ea416 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -526,10 +526,8 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
{
struct vfsmount *p;
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_hash);
- list_add(&p->mnt_hash, kill);
- }
+ for (p = mnt; p; p = next_mnt(p, mnt))
+ list_move(&p->mnt_hash, kill);
if (propagate)
propagate_umount(kill);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 90d2ea28f333..6c51c1198464 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -105,7 +105,7 @@ static struct super_operations ncp_sops =
extern struct dentry_operations ncp_root_dentry_operations;
#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern struct address_space_operations ncp_symlink_aops;
+extern const struct address_space_operations ncp_symlink_aops;
extern int ncp_symlink(struct inode*, struct dentry*, const char*);
#endif
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index e935f1b34bc2..f76b1392a012 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -99,7 +99,7 @@ fail:
/*
* symlinks can't do much...
*/
-struct address_space_operations ncp_symlink_aops = {
+const struct address_space_operations ncp_symlink_aops = {
.readpage = ncp_symlink_readpage,
};
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 402005c35ab3..8ca9707be6c9 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -909,7 +909,7 @@ int __init nfs_init_directcache(void)
* nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
*
*/
-void __exit nfs_destroy_directcache(void)
+void nfs_destroy_directcache(void)
{
if (kmem_cache_destroy(nfs_direct_cachep))
printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index add289138836..cc2b874ad5a4 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -315,7 +315,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
return !nfs_wb_page(page->mapping->host, page);
}
-struct address_space_operations nfs_file_aops = {
+const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 51bc88b662fe..c5b916605fb0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1132,7 +1132,7 @@ static int __init nfs_init_inodecache(void)
return 0;
}
-static void __exit nfs_destroy_inodecache(void)
+static void nfs_destroy_inodecache(void)
{
if (kmem_cache_destroy(nfs_inode_cachep))
printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bd2815e2dec1..4fe51c1292bb 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,15 +31,15 @@ extern struct svc_version nfs4_callback_version1;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
-extern void __exit nfs_destroy_nfspagecache(void);
+extern void nfs_destroy_nfspagecache(void);
extern int __init nfs_init_readpagecache(void);
-extern void __exit nfs_destroy_readpagecache(void);
+extern void nfs_destroy_readpagecache(void);
extern int __init nfs_init_writepagecache(void);
-extern void __exit nfs_destroy_writepagecache(void);
+extern void nfs_destroy_writepagecache(void);
#ifdef CONFIG_NFS_DIRECTIO
extern int __init nfs_init_directcache(void);
-extern void __exit nfs_destroy_directcache(void);
+extern void nfs_destroy_directcache(void);
#else
#define nfs_init_directcache() (0)
#define nfs_destroy_directcache() do {} while(0)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ef9429643ebc..d89f6fb3b3a3 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -390,7 +390,7 @@ int __init nfs_init_nfspagecache(void)
return 0;
}
-void __exit nfs_destroy_nfspagecache(void)
+void nfs_destroy_nfspagecache(void)
{
if (kmem_cache_destroy(nfs_page_cachep))
printk(KERN_INFO "nfs_page: not all structures were freed\n");
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 41c2ffee24f5..32cf3773af0c 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -711,7 +711,7 @@ int __init nfs_init_readpagecache(void)
return 0;
}
-void __exit nfs_destroy_readpagecache(void)
+void nfs_destroy_readpagecache(void)
{
mempool_destroy(nfs_rdata_mempool);
if (kmem_cache_destroy(nfs_rdata_cachep))
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b383fdd3a15c..8fccb9cb173b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1551,7 +1551,7 @@ int __init nfs_init_writepagecache(void)
return 0;
}
-void __exit nfs_destroy_writepagecache(void)
+void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
mempool_destroy(nfs_wdata_mempool);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 96c7578cbe1e..7c7d01672d35 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -123,7 +123,7 @@ static void release_stateid(struct nfs4_stateid *stp, int flags);
*/
/* recall_lock protects the del_recall_lru */
-static spinlock_t recall_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(recall_lock);
static struct list_head del_recall_lru;
static void
@@ -529,8 +529,7 @@ move_to_confirmed(struct nfs4_client *clp)
dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
list_del_init(&clp->cl_strhash);
- list_del_init(&clp->cl_idhash);
- list_add(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
+ list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
strhashval = clientstr_hashval(clp->cl_recdir);
list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
renew_client(clp);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index d852ebb538e3..fdf7cf3dfadc 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -103,8 +103,7 @@ nfsd_cache_shutdown(void)
static void
lru_put_end(struct svc_cacherep *rp)
{
- list_del(&rp->c_lru);
- list_add_tail(&rp->c_lru, &lru_head);
+ list_move_tail(&rp->c_lru, &lru_head);
}
/*
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 580412d330cb..bc579bfdfbd8 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1544,7 +1544,7 @@ err_out:
/**
* ntfs_aops - general address space operations for inodes and attributes
*/
-struct address_space_operations ntfs_aops = {
+const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
.sync_page = block_sync_page, /* Currently, just unplugs the
disk request queue. */
@@ -1560,7 +1560,7 @@ struct address_space_operations ntfs_aops = {
* ntfs_mst_aops - general address space operations for mst protecteed inodes
* and attributes
*/
-struct address_space_operations ntfs_mst_aops = {
+const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
.sync_page = block_sync_page, /* Currently, just unplugs the
disk request queue. */
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index bf7b3d7c0930..ddd3d503097c 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -57,8 +57,8 @@ extern struct kmem_cache *ntfs_attr_ctx_cache;
extern struct kmem_cache *ntfs_index_ctx_cache;
/* The various operations structs defined throughout the driver files. */
-extern struct address_space_operations ntfs_aops;
-extern struct address_space_operations ntfs_mst_aops;
+extern const struct address_space_operations ntfs_aops;
+extern const struct address_space_operations ntfs_mst_aops;
extern const struct file_operations ntfs_file_ops;
extern struct inode_operations ntfs_file_inode_ops;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 47152bf9a7f2..cca71317b6d6 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -666,7 +666,7 @@ out:
return ret;
}
-struct address_space_operations ocfs2_aops = {
+const struct address_space_operations ocfs2_aops = {
.readpage = ocfs2_readpage,
.writepage = ocfs2_writepage,
.prepare_write = ocfs2_prepare_write,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 21f38accd039..1d26cfcd9f84 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem);
* multiple hb threads are watching multiple regions. A node is live
* whenever any of the threads sees activity from the node in its region.
*/
-static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(o2hb_live_lock);
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0f60cc0d3985..1591eb37a723 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -108,7 +108,7 @@
##args); \
} while (0)
-static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(o2net_handler_lock);
static struct rb_root o2net_handler_tree = RB_ROOT;
static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 355593dd8ef8..42775e2bbe2c 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -197,12 +197,14 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
lock->ml.node == dlm->node_num ? "master" :
"remote");
memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
- } else if (lksb->flags & DLM_LKSB_PUT_LVB) {
- mlog(0, "setting lvb from lockres for %s node\n",
- lock->ml.node == dlm->node_num ? "master" :
- "remote");
- memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
}
+ /* Do nothing for lvb put requests - they should be done in
+ * place when the lock is downconverted - otherwise we risk
+ * racing gets and puts which could result in old lvb data
+ * being propagated. We leave the put flag set and clear it
+ * here. In the future we might want to clear it at the time
+ * the put is actually done.
+ */
spin_unlock(&res->spinlock);
}
@@ -381,8 +383,7 @@ do_ast:
ret = DLM_NORMAL;
if (past->type == DLM_AST) {
/* do not alter lock refcount. switching lists. */
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->granted);
+ list_move_tail(&lock->list, &res->granted);
mlog(0, "ast: adding to granted list... type=%d, "
"convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
if (lock->ml.convert_type != LKM_IVMODE) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 88cc43df18f1..9bdc9cf65991 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -37,7 +37,17 @@
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms
-#define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head))
+#define DLM_HASH_SIZE_DEFAULT (1 << 14)
+#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
+# define DLM_HASH_PAGES 1
+#else
+# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
+#endif
+#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
+#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
+
+/* Intended to make it easier for us to switch out hash functions */
+#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
enum dlm_ast_type {
DLM_AST = 0,
@@ -61,7 +71,8 @@ static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
return 0;
}
-#define DLM_RECO_STATE_ACTIVE 0x0001
+#define DLM_RECO_STATE_ACTIVE 0x0001
+#define DLM_RECO_STATE_FINALIZE 0x0002
struct dlm_recovery_ctxt
{
@@ -85,7 +96,7 @@ enum dlm_ctxt_state {
struct dlm_ctxt
{
struct list_head list;
- struct hlist_head *lockres_hash;
+ struct hlist_head **lockres_hash;
struct list_head dirty_list;
struct list_head purge_list;
struct list_head pending_asts;
@@ -120,6 +131,7 @@ struct dlm_ctxt
struct o2hb_callback_func dlm_hb_down;
struct task_struct *dlm_thread_task;
struct task_struct *dlm_reco_thread_task;
+ struct workqueue_struct *dlm_worker;
wait_queue_head_t dlm_thread_wq;
wait_queue_head_t dlm_reco_thread_wq;
wait_queue_head_t ast_wq;
@@ -132,6 +144,11 @@ struct dlm_ctxt
struct list_head dlm_eviction_callbacks;
};
+static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
+{
+ return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
+}
+
/* these keventd work queue items are for less-frequently
* called functions that cannot be directly called from the
* net message handlers for some reason, usually because
@@ -216,20 +233,29 @@ struct dlm_lock_resource
/* WARNING: Please see the comment in dlm_init_lockres before
* adding fields here. */
struct hlist_node hash_node;
+ struct qstr lockname;
struct kref refs;
- /* please keep these next 3 in this order
- * some funcs want to iterate over all lists */
+ /*
+ * Please keep granted, converting, and blocked in this order,
+ * as some funcs want to iterate over all lists.
+ *
+ * All four lists are protected by the hash's reference.
+ */
struct list_head granted;
struct list_head converting;
struct list_head blocked;
+ struct list_head purge;
+ /*
+ * These two lists require you to hold an additional reference
+ * while they are on the list.
+ */
struct list_head dirty;
struct list_head recovering; // dlm_recovery_ctxt.resources list
/* unused lock resources have their last_used stamped and are
* put on a list for the dlm thread to run. */
- struct list_head purge;
unsigned long last_used;
unsigned migration_pending:1;
@@ -238,7 +264,6 @@ struct dlm_lock_resource
wait_queue_head_t wq;
u8 owner; //node which owns the lock resource, or unknown
u16 state;
- struct qstr lockname;
char lvb[DLM_LVB_LEN];
};
@@ -300,6 +325,15 @@ enum dlm_lockres_list {
DLM_BLOCKED_LIST
};
+static inline int dlm_lvb_is_empty(char *lvb)
+{
+ int i;
+ for (i=0; i<DLM_LVB_LEN; i++)
+ if (lvb[i])
+ return 0;
+ return 1;
+}
+
static inline struct list_head *
dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
{
@@ -609,7 +643,8 @@ struct dlm_finalize_reco
{
u8 node_idx;
u8 dead_node;
- __be16 pad1;
+ u8 flags;
+ u8 pad1;
__be32 pad2;
};
@@ -676,6 +711,7 @@ void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
void dlm_put(struct dlm_ctxt *dlm);
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -687,14 +723,20 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *lockres);
-void dlm_lockres_get(struct dlm_lock_resource *res);
+static inline void dlm_lockres_get(struct dlm_lock_resource *res)
+{
+ /* This is called on every lookup, so it might be worth
+ * inlining. */
+ kref_get(&res->refs);
+}
void dlm_lockres_put(struct dlm_lock_resource *res);
void __dlm_unhash_lockres(struct dlm_lock_resource *res);
void __dlm_insert_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
const char *name,
- unsigned int len);
+ unsigned int len,
+ unsigned int hash);
struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int len);
@@ -819,6 +861,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm,
u8 dead_node);
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+int __dlm_lockres_unused(struct dlm_lock_resource *res);
static inline const char * dlm_lock_mode_name(int mode)
{
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 8285228d9e37..c764dc8e40a2 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -214,6 +214,9 @@ grant:
if (lock->ml.node == dlm->node_num)
mlog(0, "doing in-place convert for nonlocal lock\n");
lock->ml.type = type;
+ if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
+ memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+
status = DLM_NORMAL;
*call_ast = 1;
goto unlock_exit;
@@ -231,8 +234,7 @@ switch_queues:
lock->ml.convert_type = type;
/* do not alter lock refcount. switching lists. */
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->converting);
+ list_move_tail(&lock->list, &res->converting);
unlock_exit:
spin_unlock(&lock->spinlock);
@@ -248,8 +250,7 @@ void dlm_revert_pending_convert(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
/* do not alter lock refcount. switching lists. */
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->granted);
+ list_move_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE;
lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
}
@@ -294,8 +295,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->converting);
+ list_move_tail(&lock->list, &res->converting);
lock->convert_pending = 1;
lock->ml.convert_type = type;
@@ -464,6 +464,12 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
}
spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ if (status != DLM_NORMAL) {
+ spin_unlock(&res->spinlock);
+ dlm_error(status);
+ goto leave;
+ }
list_for_each(iter, &res->granted) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock->ml.cookie == cnv->cookie &&
@@ -473,6 +479,21 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
}
lock = NULL;
}
+ if (!lock) {
+ __dlm_print_one_lock_resource(res);
+ list_for_each(iter, &res->granted) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock->ml.node == cnv->node_idx) {
+ mlog(ML_ERROR, "There is something here "
+ "for node %u, lock->ml.cookie=%llu, "
+ "cnv->cookie=%llu\n", cnv->node_idx,
+ (unsigned long long)lock->ml.cookie,
+ (unsigned long long)cnv->cookie);
+ break;
+ }
+ }
+ lock = NULL;
+ }
spin_unlock(&res->spinlock);
if (!lock) {
status = DLM_IVLOCKID;
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index c7eae5d3324e..3f6c8d88f7af 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -37,10 +37,8 @@
#include "dlmapi.h"
#include "dlmcommon.h"
-#include "dlmdebug.h"
#include "dlmdomain.h"
-#include "dlmdebug.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "cluster/masklog.h"
@@ -120,6 +118,7 @@ void dlm_print_one_lock(struct dlm_lock *lockid)
}
EXPORT_SYMBOL_GPL(dlm_print_one_lock);
+#if 0
void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
{
struct dlm_lock_resource *res;
@@ -136,12 +135,13 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
spin_lock(&dlm->spinlock);
for (i=0; i<DLM_HASH_BUCKETS; i++) {
- bucket = &(dlm->lockres_hash[i]);
+ bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, iter, bucket, hash_node)
dlm_print_one_lock_resource(res);
}
spin_unlock(&dlm->spinlock);
}
+#endif /* 0 */
static const char *dlm_errnames[] = {
[DLM_NORMAL] = "DLM_NORMAL",
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
deleted file mode 100644
index 6858510c3ccd..000000000000
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * dlmdebug.h
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef DLMDEBUG_H
-#define DLMDEBUG_H
-
-void dlm_dump_lock_resources(struct dlm_ctxt *dlm);
-
-#endif
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 8f3a9e3106fd..b8c23f7ba67e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -41,7 +41,6 @@
#include "dlmapi.h"
#include "dlmcommon.h"
-#include "dlmdebug.h"
#include "dlmdomain.h"
#include "dlmver.h"
@@ -49,6 +48,33 @@
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
#include "cluster/masklog.h"
+static void dlm_free_pagevec(void **vec, int pages)
+{
+ while (pages--)
+ free_page((unsigned long)vec[pages]);
+ kfree(vec);
+}
+
+static void **dlm_alloc_pagevec(int pages)
+{
+ void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
+ int i;
+
+ if (!vec)
+ return NULL;
+
+ for (i = 0; i < pages; i++)
+ if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
+ goto out_free;
+
+ mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
+ pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE);
+ return vec;
+out_free:
+ dlm_free_pagevec(vec, i);
+ return NULL;
+}
+
/*
*
* spinlock lock ordering: if multiple locks are needed, obey this ordering:
@@ -62,7 +88,7 @@
*
*/
-spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(dlm_domain_lock);
LIST_HEAD(dlm_domains);
static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
@@ -90,8 +116,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
assert_spin_locked(&dlm->spinlock);
q = &res->lockname;
- q->hash = full_name_hash(q->name, q->len);
- bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]);
+ bucket = dlm_lockres_hash(dlm, q->hash);
/* get a reference for our hashtable */
dlm_lockres_get(res);
@@ -100,34 +125,32 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
}
struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
- const char *name,
- unsigned int len)
+ const char *name,
+ unsigned int len,
+ unsigned int hash)
{
- unsigned int hash;
- struct hlist_node *iter;
- struct dlm_lock_resource *tmpres=NULL;
struct hlist_head *bucket;
+ struct hlist_node *list;
mlog_entry("%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
- hash = full_name_hash(name, len);
-
- bucket = &(dlm->lockres_hash[hash % DLM_HASH_BUCKETS]);
-
- /* check for pre-existing lock */
- hlist_for_each(iter, bucket) {
- tmpres = hlist_entry(iter, struct dlm_lock_resource, hash_node);
- if (tmpres->lockname.len == len &&
- memcmp(tmpres->lockname.name, name, len) == 0) {
- dlm_lockres_get(tmpres);
- break;
- }
+ bucket = dlm_lockres_hash(dlm, hash);
- tmpres = NULL;
+ hlist_for_each(list, bucket) {
+ struct dlm_lock_resource *res = hlist_entry(list,
+ struct dlm_lock_resource, hash_node);
+ if (res->lockname.name[0] != name[0])
+ continue;
+ if (unlikely(res->lockname.len != len))
+ continue;
+ if (memcmp(res->lockname.name + 1, name + 1, len - 1))
+ continue;
+ dlm_lockres_get(res);
+ return res;
}
- return tmpres;
+ return NULL;
}
struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
@@ -135,9 +158,10 @@ struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
unsigned int len)
{
struct dlm_lock_resource *res;
+ unsigned int hash = dlm_lockid_hash(name, len);
spin_lock(&dlm->spinlock);
- res = __dlm_lookup_lockres(dlm, name, len);
+ res = __dlm_lookup_lockres(dlm, name, len, hash);
spin_unlock(&dlm->spinlock);
return res;
}
@@ -194,7 +218,7 @@ static int dlm_wait_on_domain_helper(const char *domain)
static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
{
if (dlm->lockres_hash)
- free_page((unsigned long) dlm->lockres_hash);
+ dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
if (dlm->name)
kfree(dlm->name);
@@ -278,11 +302,21 @@ int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
return ret;
}
+static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_worker) {
+ flush_workqueue(dlm->dlm_worker);
+ destroy_workqueue(dlm->dlm_worker);
+ dlm->dlm_worker = NULL;
+ }
+}
+
static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
{
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
+ dlm_destroy_dlm_worker(dlm);
/* We've left the domain. Now we can take ourselves out of the
* list and allow the kref stuff to help us free the
@@ -304,8 +338,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
restart:
spin_lock(&dlm->spinlock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
- while (!hlist_empty(&dlm->lockres_hash[i])) {
- res = hlist_entry(dlm->lockres_hash[i].first,
+ while (!hlist_empty(dlm_lockres_hash(dlm, i))) {
+ res = hlist_entry(dlm_lockres_hash(dlm, i)->first,
struct dlm_lock_resource, hash_node);
/* need reference when manually grabbing lockres */
dlm_lockres_get(res);
@@ -1126,6 +1160,13 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
goto bail;
}
+ dlm->dlm_worker = create_singlethread_workqueue("dlm_wq");
+ if (!dlm->dlm_worker) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
do {
unsigned int backoff;
status = dlm_try_to_join_domain(dlm);
@@ -1166,6 +1207,7 @@ bail:
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
+ dlm_destroy_dlm_worker(dlm);
}
return status;
@@ -1191,7 +1233,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave;
}
- dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL);
+ dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
if (!dlm->lockres_hash) {
mlog_errno(-ENOMEM);
kfree(dlm->name);
@@ -1200,8 +1242,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave;
}
- for (i=0; i<DLM_HASH_BUCKETS; i++)
- INIT_HLIST_HEAD(&dlm->lockres_hash[i]);
+ for (i = 0; i < DLM_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
strcpy(dlm->name, domain);
dlm->key = key;
@@ -1231,6 +1273,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
dlm->dlm_thread_task = NULL;
dlm->dlm_reco_thread_task = NULL;
+ dlm->dlm_worker = NULL;
init_waitqueue_head(&dlm->dlm_thread_wq);
init_waitqueue_head(&dlm->dlm_reco_thread_wq);
init_waitqueue_head(&dlm->reco.event);
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 7273d9fa6bab..033ad1701232 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -116,7 +116,7 @@ static int dlmfs_file_open(struct inode *inode,
* doesn't make sense for LVB writes. */
file->f_flags &= ~O_APPEND;
- fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ fp = kmalloc(sizeof(*fp), GFP_NOFS);
if (!fp) {
status = -ENOMEM;
goto bail;
@@ -196,7 +196,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
else
readlen = count - *ppos;
- lvb_buf = kmalloc(readlen, GFP_KERNEL);
+ lvb_buf = kmalloc(readlen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
@@ -240,7 +240,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
else
writelen = count - *ppos;
- lvb_buf = kmalloc(writelen, GFP_KERNEL);
+ lvb_buf = kmalloc(writelen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 6fea28318d6d..5ca57ec650c7 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -53,7 +53,7 @@
#define MLOG_MASK_PREFIX ML_DLM
#include "cluster/masklog.h"
-static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dlm_cookie_lock);
static u64 dlm_next_cookie = 1;
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
@@ -201,6 +201,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags)
{
enum dlm_status status = DLM_DENIED;
+ int lockres_changed = 1;
mlog_entry("type=%d\n", lock->ml.type);
mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
@@ -226,8 +227,25 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->lock_pending = 0;
if (status != DLM_NORMAL) {
- if (status != DLM_NOTQUEUED)
+ if (status == DLM_RECOVERING &&
+ dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ /* recovery lock was mastered by dead node.
+ * we need to have calc_usage shoot down this
+ * lockres and completely remaster it. */
+ mlog(0, "%s: recovery lock was owned by "
+ "dead node %u, remaster it now.\n",
+ dlm->name, res->owner);
+ } else if (status != DLM_NOTQUEUED) {
+ /*
+ * DO NOT call calc_usage, as this would unhash
+ * the remote lockres before we ever get to use
+ * it. treat as if we never made any change to
+ * the lockres.
+ */
+ lockres_changed = 0;
dlm_error(status);
+ }
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
} else if (dlm_is_recovery_lock(res->lockname.name,
@@ -239,12 +257,12 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
mlog(0, "%s: $RECOVERY lock for this node (%u) is "
"mastered by %u; got lock, manually granting (no ast)\n",
dlm->name, dlm->node_num, res->owner);
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->granted);
+ list_move_tail(&lock->list, &res->granted);
}
spin_unlock(&res->spinlock);
- dlm_lockres_calc_usage(dlm, res);
+ if (lockres_changed)
+ dlm_lockres_calc_usage(dlm, res);
wake_up(&res->wq);
return status;
@@ -281,6 +299,14 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
if (tmpret >= 0) {
// successfully sent and received
ret = status; // this is already a dlm_status
+ if (ret == DLM_REJECTED) {
+ mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
+ "no longer owned by %u. that node is coming back "
+ "up currently.\n", dlm->name, create.namelen,
+ create.name, res->owner);
+ dlm_print_one_lock_resource(res);
+ BUG();
+ }
} else {
mlog_errno(tmpret);
if (dlm_is_host_down(tmpret)) {
@@ -382,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
struct dlm_lock *lock;
int kernel_allocated = 0;
- lock = kcalloc(1, sizeof(*lock), GFP_KERNEL);
+ lock = kcalloc(1, sizeof(*lock), GFP_NOFS);
if (!lock)
return NULL;
if (!lksb) {
/* zero memory only if kernel-allocated */
- lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL);
+ lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS);
if (!lksb) {
kfree(lock);
return NULL;
@@ -429,11 +455,16 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data)
if (!dlm_grab(dlm))
return DLM_REJECTED;
- mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
- "Domain %s not fully joined!\n", dlm->name);
-
name = create->name;
namelen = create->namelen;
+ status = DLM_REJECTED;
+ if (!dlm_domain_fully_joined(dlm)) {
+ mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
+ "sending a create_lock message for lock %.*s!\n",
+ dlm->name, create->node_idx, namelen, name);
+ dlm_error(status);
+ goto leave;
+ }
status = DLM_IVBUFLEN;
if (namelen > DLM_LOCKID_NAME_MAX) {
@@ -669,18 +700,22 @@ retry_lock:
msleep(100);
/* no waiting for dlm_reco_thread */
if (recovery) {
- if (status == DLM_RECOVERING) {
- mlog(0, "%s: got RECOVERING "
- "for $REOCVERY lock, master "
- "was %u\n", dlm->name,
- res->owner);
- dlm_wait_for_node_death(dlm, res->owner,
- DLM_NODE_DEATH_WAIT_MAX);
- }
+ if (status != DLM_RECOVERING)
+ goto retry_lock;
+
+ mlog(0, "%s: got RECOVERING "
+ "for $RECOVERY lock, master "
+ "was %u\n", dlm->name,
+ res->owner);
+ /* wait to see the node go down, then
+ * drop down and allow the lockres to
+ * get cleaned up. need to remaster. */
+ dlm_wait_for_node_death(dlm, res->owner,
+ DLM_NODE_DEATH_WAIT_MAX);
} else {
dlm_wait_for_recovery(dlm);
+ goto retry_lock;
}
- goto retry_lock;
}
if (status != DLM_NORMAL) {
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 940be4c13b1f..1b8346dd0572 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -47,7 +47,6 @@
#include "dlmapi.h"
#include "dlmcommon.h"
-#include "dlmdebug.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
@@ -74,6 +73,7 @@ struct dlm_master_list_entry
wait_queue_head_t wq;
atomic_t woken;
struct kref mle_refs;
+ int inuse;
unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
@@ -127,18 +127,30 @@ static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
return 1;
}
-#if 0
-/* Code here is included but defined out as it aids debugging */
+#define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
+static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
+{
+ int i;
+ printk("%s=[ ", mapname);
+ for (i=0; i<O2NM_MAX_NODES; i++)
+ if (test_bit(i, map))
+ printk("%d ", i);
+ printk("]");
+}
-void dlm_print_one_mle(struct dlm_master_list_entry *mle)
+static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
{
- int i = 0, refs;
+ int refs;
char *type;
char attached;
u8 master;
unsigned int namelen;
const char *name;
struct kref *k;
+ unsigned long *maybe = mle->maybe_map,
+ *vote = mle->vote_map,
+ *resp = mle->response_map,
+ *node = mle->node_map;
k = &mle->mle_refs;
if (mle->type == DLM_MLE_BLOCK)
@@ -159,18 +171,29 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
name = mle->u.res->lockname.name;
}
- mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n",
- i, type, refs, master, mle->new_master, attached,
- namelen, namelen, name);
+ mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
+ namelen, name, type, refs, master, mle->new_master, attached,
+ mle->inuse);
+ dlm_print_nodemap(maybe);
+ printk(", ");
+ dlm_print_nodemap(vote);
+ printk(", ");
+ dlm_print_nodemap(resp);
+ printk(", ");
+ dlm_print_nodemap(node);
+ printk(", ");
+ printk("\n");
}
+#if 0
+/* Code here is included but defined out as it aids debugging */
+
static void dlm_dump_mles(struct dlm_ctxt *dlm)
{
struct dlm_master_list_entry *mle;
struct list_head *iter;
mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
- mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n");
spin_lock(&dlm->master_lock);
list_for_each(iter, &dlm->master_list) {
mle = list_entry(iter, struct dlm_master_list_entry, list);
@@ -314,6 +337,31 @@ static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
spin_unlock(&dlm->spinlock);
}
+static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+ mle->inuse++;
+ kref_get(&mle->mle_refs);
+}
+
+static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ mle->inuse--;
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+}
+
/* remove from list and free */
static void __dlm_put_mle(struct dlm_master_list_entry *mle)
{
@@ -322,9 +370,14 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle)
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
- BUG_ON(!atomic_read(&mle->mle_refs.refcount));
-
- kref_put(&mle->mle_refs, dlm_mle_release);
+ if (!atomic_read(&mle->mle_refs.refcount)) {
+ /* this may or may not crash, but who cares.
+ * it's a BUG. */
+ mlog(ML_ERROR, "bad mle: %p\n", mle);
+ dlm_print_one_mle(mle);
+ BUG();
+ } else
+ kref_put(&mle->mle_refs, dlm_mle_release);
}
@@ -367,6 +420,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
memset(mle->response_map, 0, sizeof(mle->response_map));
mle->master = O2NM_MAX_NODES;
mle->new_master = O2NM_MAX_NODES;
+ mle->inuse = 0;
if (mle->type == DLM_MLE_MASTER) {
BUG_ON(!res);
@@ -564,6 +618,28 @@ static void dlm_lockres_release(struct kref *kref)
mlog(0, "destroying lockres %.*s\n", res->lockname.len,
res->lockname.name);
+ if (!hlist_unhashed(&res->hash_node) ||
+ !list_empty(&res->granted) ||
+ !list_empty(&res->converting) ||
+ !list_empty(&res->blocked) ||
+ !list_empty(&res->dirty) ||
+ !list_empty(&res->recovering) ||
+ !list_empty(&res->purge)) {
+ mlog(ML_ERROR,
+ "Going to BUG for resource %.*s."
+ " We're on a list! [%c%c%c%c%c%c%c]\n",
+ res->lockname.len, res->lockname.name,
+ !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
+ !list_empty(&res->granted) ? 'G' : ' ',
+ !list_empty(&res->converting) ? 'C' : ' ',
+ !list_empty(&res->blocked) ? 'B' : ' ',
+ !list_empty(&res->dirty) ? 'D' : ' ',
+ !list_empty(&res->recovering) ? 'R' : ' ',
+ !list_empty(&res->purge) ? 'P' : ' ');
+
+ dlm_print_one_lock_resource(res);
+ }
+
/* By the time we're ready to blow this guy away, we shouldn't
* be on any lists. */
BUG_ON(!hlist_unhashed(&res->hash_node));
@@ -579,11 +655,6 @@ static void dlm_lockres_release(struct kref *kref)
kfree(res);
}
-void dlm_lockres_get(struct dlm_lock_resource *res)
-{
- kref_get(&res->refs);
-}
-
void dlm_lockres_put(struct dlm_lock_resource *res)
{
kref_put(&res->refs, dlm_lockres_release);
@@ -603,7 +674,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
memcpy(qname, name, namelen);
res->lockname.len = namelen;
- res->lockname.hash = full_name_hash(name, namelen);
+ res->lockname.hash = dlm_lockid_hash(name, namelen);
init_waitqueue_head(&res->wq);
spin_lock_init(&res->spinlock);
@@ -637,11 +708,11 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
{
struct dlm_lock_resource *res;
- res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
+ res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
if (!res)
return NULL;
- res->lockname.name = kmalloc(namelen, GFP_KERNEL);
+ res->lockname.name = kmalloc(namelen, GFP_NOFS);
if (!res->lockname.name) {
kfree(res);
return NULL;
@@ -677,19 +748,20 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
int blocked = 0;
int ret, nodenum;
struct dlm_node_iter iter;
- unsigned int namelen;
+ unsigned int namelen, hash;
int tries = 0;
int bit, wait_on_recovery = 0;
BUG_ON(!lockid);
namelen = strlen(lockid);
+ hash = dlm_lockid_hash(lockid, namelen);
mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
lookup:
spin_lock(&dlm->spinlock);
- tmpres = __dlm_lookup_lockres(dlm, lockid, namelen);
+ tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
if (tmpres) {
spin_unlock(&dlm->spinlock);
mlog(0, "found in hash!\n");
@@ -704,7 +776,7 @@ lookup:
mlog(0, "allocating a new resource\n");
/* nothing found and we need to allocate one. */
alloc_mle = (struct dlm_master_list_entry *)
- kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
+ kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!alloc_mle)
goto leave;
res = dlm_new_lockres(dlm, lockid, namelen);
@@ -790,10 +862,11 @@ lookup:
* if so, the creator of the BLOCK may try to put the last
* ref at this time in the assert master handler, so we
* need an extra one to keep from a bad ptr deref. */
- dlm_get_mle(mle);
+ dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
+redo_request:
while (wait_on_recovery) {
/* any cluster changes that occurred after dropping the
* dlm spinlock would be detectable be a change on the mle,
@@ -812,7 +885,7 @@ lookup:
}
dlm_kick_recovery_thread(dlm);
- msleep(100);
+ msleep(1000);
dlm_wait_for_recovery(dlm);
spin_lock(&dlm->spinlock);
@@ -825,13 +898,15 @@ lookup:
} else
wait_on_recovery = 0;
spin_unlock(&dlm->spinlock);
+
+ if (wait_on_recovery)
+ dlm_wait_for_node_recovery(dlm, bit, 10000);
}
/* must wait for lock to be mastered elsewhere */
if (blocked)
goto wait;
-redo_request:
ret = -EINVAL;
dlm_node_iter_init(mle->vote_map, &iter);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
@@ -856,6 +931,7 @@ wait:
/* keep going until the response map includes all nodes */
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
+ wait_on_recovery = 1;
mlog(0, "%s:%.*s: node map changed, redo the "
"master request now, blocked=%d\n",
dlm->name, res->lockname.len,
@@ -866,7 +942,7 @@ wait:
dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
- /* dlm_print_one_mle(mle); */
+ dlm_print_one_mle(mle);
tries = 0;
}
goto redo_request;
@@ -880,7 +956,7 @@ wait:
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
/* put the extra ref */
- dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
wake_waiters:
spin_lock(&res->spinlock);
@@ -921,12 +997,14 @@ recheck:
spin_unlock(&res->spinlock);
/* this will cause the master to re-assert across
* the whole cluster, freeing up mles */
- ret = dlm_do_master_request(mle, res->owner);
- if (ret < 0) {
- /* give recovery a chance to run */
- mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
- msleep(500);
- goto recheck;
+ if (res->owner != dlm->node_num) {
+ ret = dlm_do_master_request(mle, res->owner);
+ if (ret < 0) {
+ /* give recovery a chance to run */
+ mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
+ msleep(500);
+ goto recheck;
+ }
}
ret = 0;
goto leave;
@@ -962,6 +1040,12 @@ recheck:
"rechecking now\n", dlm->name, res->lockname.len,
res->lockname.name);
goto recheck;
+ } else {
+ if (!voting_done) {
+ mlog(0, "map not changed and voting not done "
+ "for %s:%.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ }
}
if (m != O2NM_MAX_NODES) {
@@ -1129,18 +1213,6 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
set_bit(node, mle->vote_map);
} else {
mlog(ML_ERROR, "node down! %d\n", node);
-
- /* if the node wasn't involved in mastery skip it,
- * but clear it out from the maps so that it will
- * not affect mastery of this lockres */
- clear_bit(node, mle->response_map);
- clear_bit(node, mle->vote_map);
- if (!test_bit(node, mle->maybe_map))
- goto next;
-
- /* if we're already blocked on lock mastery, and the
- * dead node wasn't the expected master, or there is
- * another node in the maybe_map, keep waiting */
if (blocked) {
int lowest = find_next_bit(mle->maybe_map,
O2NM_MAX_NODES, 0);
@@ -1148,54 +1220,53 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
/* act like it was never there */
clear_bit(node, mle->maybe_map);
- if (node != lowest)
- goto next;
-
- mlog(ML_ERROR, "expected master %u died while "
- "this node was blocked waiting on it!\n",
- node);
- lowest = find_next_bit(mle->maybe_map,
- O2NM_MAX_NODES,
- lowest+1);
- if (lowest < O2NM_MAX_NODES) {
- mlog(0, "still blocked. waiting "
- "on %u now\n", lowest);
- goto next;
+ if (node == lowest) {
+ mlog(0, "expected master %u died"
+ " while this node was blocked "
+ "waiting on it!\n", node);
+ lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES,
+ lowest+1);
+ if (lowest < O2NM_MAX_NODES) {
+ mlog(0, "%s:%.*s:still "
+ "blocked. waiting on %u "
+ "now\n", dlm->name,
+ res->lockname.len,
+ res->lockname.name,
+ lowest);
+ } else {
+ /* mle is an MLE_BLOCK, but
+ * there is now nothing left to
+ * block on. we need to return
+ * all the way back out and try
+ * again with an MLE_MASTER.
+ * dlm_do_local_recovery_cleanup
+ * has already run, so the mle
+ * refcount is ok */
+ mlog(0, "%s:%.*s: no "
+ "longer blocking. try to "
+ "master this here\n",
+ dlm->name,
+ res->lockname.len,
+ res->lockname.name);
+ mle->type = DLM_MLE_MASTER;
+ mle->u.res = res;
+ }
}
-
- /* mle is an MLE_BLOCK, but there is now
- * nothing left to block on. we need to return
- * all the way back out and try again with
- * an MLE_MASTER. dlm_do_local_recovery_cleanup
- * has already run, so the mle refcount is ok */
- mlog(0, "no longer blocking. we can "
- "try to master this here\n");
- mle->type = DLM_MLE_MASTER;
- memset(mle->maybe_map, 0,
- sizeof(mle->maybe_map));
- memset(mle->response_map, 0,
- sizeof(mle->maybe_map));
- memcpy(mle->vote_map, mle->node_map,
- sizeof(mle->node_map));
- mle->u.res = res;
- set_bit(dlm->node_num, mle->maybe_map);
-
- ret = -EAGAIN;
- goto next;
}
- clear_bit(node, mle->maybe_map);
- if (node > dlm->node_num)
- goto next;
-
- mlog(0, "dead node in map!\n");
- /* yuck. go back and re-contact all nodes
- * in the vote_map, removing this node. */
- memset(mle->response_map, 0,
- sizeof(mle->response_map));
+ /* now blank out everything, as if we had never
+ * contacted anyone */
+ memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
+ memset(mle->response_map, 0, sizeof(mle->response_map));
+ /* reset the vote_map to the current node_map */
+ memcpy(mle->vote_map, mle->node_map,
+ sizeof(mle->node_map));
+ /* put myself into the maybe map */
+ if (mle->type != DLM_MLE_BLOCK)
+ set_bit(dlm->node_num, mle->maybe_map);
}
ret = -EAGAIN;
-next:
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
}
return ret;
@@ -1316,7 +1387,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
char *name;
- unsigned int namelen;
+ unsigned int namelen, hash;
int found, ret;
int set_maybe;
int dispatch_assert = 0;
@@ -1331,6 +1402,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
name = request->name;
namelen = request->namelen;
+ hash = dlm_lockid_hash(name, namelen);
if (namelen > DLM_LOCKID_NAME_MAX) {
response = DLM_IVBUFLEN;
@@ -1339,7 +1411,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
way_up_top:
spin_lock(&dlm->spinlock);
- res = __dlm_lookup_lockres(dlm, name, namelen);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
if (res) {
spin_unlock(&dlm->spinlock);
@@ -1459,21 +1531,18 @@ way_up_top:
spin_unlock(&dlm->spinlock);
mle = (struct dlm_master_list_entry *)
- kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
+ kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
if (!mle) {
response = DLM_MASTER_RESP_ERROR;
mlog_errno(-ENOMEM);
goto send_response;
}
- spin_lock(&dlm->spinlock);
- dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
- name, namelen);
- spin_unlock(&dlm->spinlock);
goto way_up_top;
}
// mlog(0, "this is second time thru, already allocated, "
// "add the block.\n");
+ dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
set_bit(request->node_idx, mle->maybe_map);
list_add(&mle->list, &dlm->master_list);
response = DLM_MASTER_RESP_NO;
@@ -1556,6 +1625,8 @@ again:
dlm_node_iter_init(nodemap, &iter);
while ((to = dlm_node_iter_next(&iter)) >= 0) {
int r = 0;
+ struct dlm_master_list_entry *mle = NULL;
+
mlog(0, "sending assert master to %d (%.*s)\n", to,
namelen, lockname);
memset(&assert, 0, sizeof(assert));
@@ -1567,20 +1638,28 @@ again:
tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
- mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
+ mlog(0, "assert_master returned %d!\n", tmpret);
if (!dlm_is_host_down(tmpret)) {
- mlog(ML_ERROR, "unhandled error!\n");
+ mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
BUG();
}
/* a node died. finish out the rest of the nodes. */
- mlog(ML_ERROR, "link to %d went down!\n", to);
+ mlog(0, "link to %d went down!\n", to);
/* any nonzero status return will do */
ret = tmpret;
} else if (r < 0) {
/* ok, something horribly messed. kill thyself. */
mlog(ML_ERROR,"during assert master of %.*s to %u, "
"got %d.\n", namelen, lockname, to, r);
- dlm_dump_lock_resources(dlm);
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ if (dlm_find_mle(dlm, &mle, (char *)lockname,
+ namelen)) {
+ dlm_print_one_mle(mle);
+ __dlm_put_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
BUG();
} else if (r == EAGAIN) {
mlog(0, "%.*s: node %u create mles on other "
@@ -1612,7 +1691,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
struct dlm_lock_resource *res = NULL;
char *name;
- unsigned int namelen;
+ unsigned int namelen, hash;
u32 flags;
int master_request = 0;
int ret = 0;
@@ -1622,6 +1701,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
name = assert->name;
namelen = assert->namelen;
+ hash = dlm_lockid_hash(name, namelen);
flags = be32_to_cpu(assert->flags);
if (namelen > DLM_LOCKID_NAME_MAX) {
@@ -1646,7 +1726,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely.
* could be master just re-asserting. */
- mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
+ mlog(0, "no bits set in the maybe_map, but %u "
"is asserting! (%.*s)\n", assert->node_idx,
namelen, name);
} else if (bit != assert->node_idx) {
@@ -1658,19 +1738,36 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
* number winning the mastery will respond
* YES to mastery requests, but this node
* had no way of knowing. let it pass. */
- mlog(ML_ERROR, "%u is the lowest node, "
+ mlog(0, "%u is the lowest node, "
"%u is asserting. (%.*s) %u must "
"have begun after %u won.\n", bit,
assert->node_idx, namelen, name, bit,
assert->node_idx);
}
}
+ if (mle->type == DLM_MLE_MIGRATION) {
+ if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+ mlog(0, "%s:%.*s: got cleanup assert"
+ " from %u for migration\n",
+ dlm->name, namelen, name,
+ assert->node_idx);
+ } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
+ mlog(0, "%s:%.*s: got unrelated assert"
+ " from %u for migration, ignoring\n",
+ dlm->name, namelen, name,
+ assert->node_idx);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+ goto done;
+ }
+ }
}
spin_unlock(&dlm->master_lock);
/* ok everything checks out with the MLE
* now check to see if there is a lockres */
- res = __dlm_lookup_lockres(dlm, name, namelen);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -1679,7 +1776,8 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
goto kill;
}
if (!mle) {
- if (res->owner != assert->node_idx) {
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
+ res->owner != assert->node_idx) {
mlog(ML_ERROR, "assert_master from "
"%u, but current owner is "
"%u! (%.*s)\n",
@@ -1732,6 +1830,7 @@ ok:
if (mle) {
int extra_ref = 0;
int nn = -1;
+ int rr, err = 0;
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
@@ -1751,27 +1850,64 @@ ok:
wake_up(&mle->wq);
spin_unlock(&mle->spinlock);
- if (mle->type == DLM_MLE_MIGRATION && res) {
- mlog(0, "finishing off migration of lockres %.*s, "
- "from %u to %u\n",
- res->lockname.len, res->lockname.name,
- dlm->node_num, mle->new_master);
+ if (res) {
spin_lock(&res->spinlock);
- res->state &= ~DLM_LOCK_RES_MIGRATING;
- dlm_change_lockres_owner(dlm, res, mle->new_master);
- BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
+ if (mle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "finishing off migration of lockres %.*s, "
+ "from %u to %u\n",
+ res->lockname.len, res->lockname.name,
+ dlm->node_num, mle->new_master);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ dlm_change_lockres_owner(dlm, res, mle->new_master);
+ BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
+ } else {
+ dlm_change_lockres_owner(dlm, res, mle->master);
+ }
spin_unlock(&res->spinlock);
}
- /* master is known, detach if not already detached */
- dlm_mle_detach_hb_events(dlm, mle);
- dlm_put_mle(mle);
-
+
+ /* master is known, detach if not already detached.
+ * ensures that only one assert_master call will happen
+ * on this mle. */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+
+ rr = atomic_read(&mle->mle_refs.refcount);
+ if (mle->inuse > 0) {
+ if (extra_ref && rr < 3)
+ err = 1;
+ else if (!extra_ref && rr < 2)
+ err = 1;
+ } else {
+ if (extra_ref && rr < 2)
+ err = 1;
+ else if (!extra_ref && rr < 1)
+ err = 1;
+ }
+ if (err) {
+ mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
+ "that will mess up this node, refs=%d, extra=%d, "
+ "inuse=%d\n", dlm->name, namelen, name,
+ assert->node_idx, rr, extra_ref, mle->inuse);
+ dlm_print_one_mle(mle);
+ }
+ list_del_init(&mle->list);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
if (extra_ref) {
/* the assert master message now balances the extra
* ref given by the master / migration request message.
* if this is the last put, it will be removed
* from the list. */
- dlm_put_mle(mle);
+ __dlm_put_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+ } else if (res) {
+ if (res->owner != assert->node_idx) {
+ mlog(0, "assert_master from %u, but current "
+ "owner is %u (%.*s), no mle\n", assert->node_idx,
+ res->owner, namelen, name);
}
}
@@ -1788,12 +1924,12 @@ done:
kill:
/* kill the caller! */
+ mlog(ML_ERROR, "Bad message received from another node. Dumping state "
+ "and killing the other node now! This node is OK and can continue.\n");
+ __dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
dlm_lockres_put(res);
- mlog(ML_ERROR, "Bad message received from another node. Dumping state "
- "and killing the other node now! This node is OK and can continue.\n");
- dlm_dump_lock_resources(dlm);
dlm_put(dlm);
return -EINVAL;
}
@@ -1803,7 +1939,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
int ignore_higher, u8 request_from, u32 flags)
{
struct dlm_work_item *item;
- item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_NOFS);
if (!item)
return -ENOMEM;
@@ -1825,7 +1961,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
- schedule_work(&dlm->dispatched_work);
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
return 0;
}
@@ -1866,6 +2002,23 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
}
}
+ /*
+ * If we're migrating this lock to someone else, we are no
+ * longer allowed to assert out own mastery. OTOH, we need to
+ * prevent migration from starting while we're still asserting
+ * our dominance. The reserved ast delays migration.
+ */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ mlog(0, "Someone asked us to assert mastery, but we're "
+ "in the middle of migration. Skipping assert, "
+ "the new master will handle that.\n");
+ spin_unlock(&res->spinlock);
+ goto put;
+ } else
+ __dlm_lockres_reserve_ast(res);
+ spin_unlock(&res->spinlock);
+
/* this call now finishes out the nodemap
* even if one or more nodes die */
mlog(0, "worker about to master %.*s here, this=%u\n",
@@ -1875,9 +2028,14 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
nodemap, flags);
if (ret < 0) {
/* no need to restart, we are done */
- mlog_errno(ret);
+ if (!dlm_is_host_down(ret))
+ mlog_errno(ret);
}
+ /* Ok, we've asserted ourselves. Let's let migration start. */
+ dlm_lockres_release_ast(dlm, res);
+
+put:
dlm_lockres_put(res);
mlog(0, "finished with dlm_assert_master_worker\n");
@@ -1916,6 +2074,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
BUG();
/* host is down, so answer for that node would be
* DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
+ ret = 0;
}
if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
@@ -2016,14 +2175,14 @@ int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
*/
ret = -ENOMEM;
- mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
+ mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
if (!mres) {
mlog_errno(ret);
goto leave;
}
mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
- GFP_KERNEL);
+ GFP_NOFS);
if (!mle) {
mlog_errno(ret);
goto leave;
@@ -2117,7 +2276,7 @@ fail:
* take both dlm->spinlock and dlm->master_lock */
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
- dlm_get_mle(mle);
+ dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
@@ -2134,7 +2293,10 @@ fail:
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
- dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
goto leave;
}
@@ -2164,8 +2326,8 @@ fail:
/* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
- mlog(0, "%s:%.*s: expected migration target %u "
- "is no longer up. restarting.\n",
+ mlog(0, "%s:%.*s: expected migration "
+ "target %u is no longer up, restarting\n",
dlm->name, res->lockname.len,
res->lockname.name, target);
ret = -ERESTARTSYS;
@@ -2175,7 +2337,10 @@ fail:
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
- dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
goto leave;
}
/* TODO: if node died: stop, clean up, return error */
@@ -2191,7 +2356,7 @@ fail:
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, mle);
- dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
ret = 0;
dlm_lockres_calc_usage(dlm, res);
@@ -2462,7 +2627,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
const char *name;
- unsigned int namelen;
+ unsigned int namelen, hash;
int ret = 0;
if (!dlm_grab(dlm))
@@ -2470,10 +2635,11 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
name = migrate->name;
namelen = migrate->namelen;
+ hash = dlm_lockid_hash(name, namelen);
/* preallocate.. if this fails, abort */
mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
- GFP_KERNEL);
+ GFP_NOFS);
if (!mle) {
ret = -ENOMEM;
@@ -2482,7 +2648,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
/* check for pre-existing lock */
spin_lock(&dlm->spinlock);
- res = __dlm_lookup_lockres(dlm, name, namelen);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
spin_lock(&dlm->master_lock);
if (res) {
@@ -2580,6 +2746,7 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
/* remove it from the list so that only one
* mle will be found */
list_del_init(&tmp->list);
+ __dlm_mle_detach_hb_events(dlm, mle);
}
spin_unlock(&tmp->spinlock);
}
@@ -2601,6 +2768,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
struct list_head *iter, *iter2;
struct dlm_master_list_entry *mle;
struct dlm_lock_resource *res;
+ unsigned int hash;
mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
top:
@@ -2640,7 +2808,7 @@ top:
* may result in the mle being unlinked and
* freed, but there may still be a process
* waiting in the dlmlock path which is fine. */
- mlog(ML_ERROR, "node %u was expected master\n",
+ mlog(0, "node %u was expected master\n",
dead_node);
atomic_set(&mle->woken, 1);
spin_unlock(&mle->spinlock);
@@ -2673,19 +2841,21 @@ top:
/* remove from the list early. NOTE: unlinking
* list_head while in list_for_each_safe */
+ __dlm_mle_detach_hb_events(dlm, mle);
spin_lock(&mle->spinlock);
list_del_init(&mle->list);
atomic_set(&mle->woken, 1);
spin_unlock(&mle->spinlock);
wake_up(&mle->wq);
- mlog(0, "node %u died during migration from "
- "%u to %u!\n", dead_node,
+ mlog(0, "%s: node %u died during migration from "
+ "%u to %u!\n", dlm->name, dead_node,
mle->master, mle->new_master);
/* if there is a lockres associated with this
* mle, find it and set its owner to UNKNOWN */
+ hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
res = __dlm_lookup_lockres(dlm, mle->u.name.name,
- mle->u.name.len);
+ mle->u.name.len, hash);
if (res) {
/* unfortunately if we hit this rare case, our
* lock ordering is messed. we need to drop
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 805cbabac051..29b2845f370d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -98,8 +98,8 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
static u64 dlm_get_next_mig_cookie(void);
-static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dlm_reco_state_lock);
+static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
static u64 dlm_mig_cookie = 1;
static u64 dlm_get_next_mig_cookie(void)
@@ -115,12 +115,37 @@ static u64 dlm_get_next_mig_cookie(void)
return c;
}
+static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
+ u8 dead_node)
+{
+ assert_spin_locked(&dlm->spinlock);
+ if (dlm->reco.dead_node != dead_node)
+ mlog(0, "%s: changing dead_node from %u to %u\n",
+ dlm->name, dlm->reco.dead_node, dead_node);
+ dlm->reco.dead_node = dead_node;
+}
+
+static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
+ u8 master)
+{
+ assert_spin_locked(&dlm->spinlock);
+ mlog(0, "%s: changing new_master from %u to %u\n",
+ dlm->name, dlm->reco.new_master, master);
+ dlm->reco.new_master = master;
+}
+
+static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
+{
+ assert_spin_locked(&dlm->spinlock);
+ clear_bit(dlm->reco.dead_node, dlm->recovery_map);
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
+ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
+}
+
static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
- clear_bit(dlm->reco.dead_node, dlm->recovery_map);
- dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
- dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ __dlm_reset_recovery(dlm);
spin_unlock(&dlm->spinlock);
}
@@ -132,12 +157,21 @@ void dlm_dispatch_work(void *data)
struct list_head *iter, *iter2;
struct dlm_work_item *item;
dlm_workfunc_t *workfunc;
+ int tot=0;
+
+ if (!dlm_joined(dlm))
+ return;
spin_lock(&dlm->work_lock);
list_splice_init(&dlm->work_list, &tmp_list);
spin_unlock(&dlm->work_lock);
list_for_each_safe(iter, iter2, &tmp_list) {
+ tot++;
+ }
+ mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
+
+ list_for_each_safe(iter, iter2, &tmp_list) {
item = list_entry(iter, struct dlm_work_item, list);
workfunc = item->func;
list_del_init(&item->list);
@@ -220,6 +254,52 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
*
*/
+static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
+{
+ struct dlm_reco_node_data *ndata;
+ struct dlm_lock_resource *res;
+
+ mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
+ dlm->name, dlm->dlm_reco_thread_task->pid,
+ dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
+ dlm->reco.dead_node, dlm->reco.new_master);
+
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
+ char *st = "unknown";
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ st = "init";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ st = "requesting";
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ st = "dead";
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ st = "receiving";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ st = "requested";
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ st = "done";
+ break;
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ st = "finalize-sent";
+ break;
+ default:
+ st = "bad";
+ break;
+ }
+ mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
+ dlm->name, ndata->node_num, st);
+ }
+ list_for_each_entry(res, &dlm->reco.resources, recovering) {
+ mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ }
+}
#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
@@ -267,11 +347,23 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
{
int dead;
spin_lock(&dlm->spinlock);
- dead = test_bit(node, dlm->domain_map);
+ dead = !test_bit(node, dlm->domain_map);
spin_unlock(&dlm->spinlock);
return dead;
}
+/* returns true if node is no longer in the domain
+ * could be dead or just not joined */
+static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
+{
+ int recovered;
+ spin_lock(&dlm->spinlock);
+ recovered = !test_bit(node, dlm->recovery_map);
+ spin_unlock(&dlm->spinlock);
+ return recovered;
+}
+
+
int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
if (timeout) {
@@ -290,6 +382,24 @@ int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
return 0;
}
+int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+{
+ if (timeout) {
+ mlog(0, "%s: waiting %dms for notification of "
+ "recovery of node %u\n", dlm->name, timeout, node);
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ } else {
+ mlog(0, "%s: waiting indefinitely for notification "
+ "of recovery of node %u\n", dlm->name, node);
+ wait_event(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node));
+ }
+ /* for now, return 0 */
+ return 0;
+}
+
/* callers of the top-level api calls (dlmlock/dlmunlock) should
* block on the dlm->reco.event when recovery is in progress.
* the dlm recovery thread will set this state when it begins
@@ -308,6 +418,13 @@ static int dlm_in_recovery(struct dlm_ctxt *dlm)
void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
{
+ if (dlm_in_recovery(dlm)) {
+ mlog(0, "%s: reco thread %d in recovery: "
+ "state=%d, master=%u, dead=%u\n",
+ dlm->name, dlm->dlm_reco_thread_task->pid,
+ dlm->reco.state, dlm->reco.new_master,
+ dlm->reco.dead_node);
+ }
wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
}
@@ -341,7 +458,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
mlog(0, "new master %u died while recovering %u!\n",
dlm->reco.new_master, dlm->reco.dead_node);
/* unset the new_master, leave dead_node */
- dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
}
/* select a target to recover */
@@ -350,14 +467,14 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
if (bit >= O2NM_MAX_NODES || bit < 0)
- dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else
- dlm->reco.dead_node = bit;
+ dlm_set_reco_dead_node(dlm, bit);
} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
/* BUG? */
mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
dlm->reco.dead_node);
- dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
}
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
@@ -366,7 +483,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
/* return to main thread loop and sleep. */
return 0;
}
- mlog(0, "recovery thread found node %u in the recovery map!\n",
+ mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
+ dlm->name, dlm->dlm_reco_thread_task->pid,
dlm->reco.dead_node);
spin_unlock(&dlm->spinlock);
@@ -389,8 +507,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
}
mlog(0, "another node will master this recovery session.\n");
}
- mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
- dlm->name, dlm->reco.new_master,
+ mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
+ dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
dlm->node_num, dlm->reco.dead_node);
/* it is safe to start everything back up here
@@ -402,11 +520,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
return 0;
master_here:
- mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
+ mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->dlm_reco_thread_task->pid,
dlm->name, dlm->reco.dead_node, dlm->node_num);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
+ /* we should never hit this anymore */
mlog(ML_ERROR, "error %d remastering locks for node %u, "
"retrying.\n", status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
@@ -433,9 +553,16 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
int destroy = 0;
int pass = 0;
- status = dlm_init_recovery_area(dlm, dead_node);
- if (status < 0)
- goto leave;
+ do {
+ /* we have become recovery master. there is no escaping
+ * this, so just keep trying until we get it. */
+ status = dlm_init_recovery_area(dlm, dead_node);
+ if (status < 0) {
+ mlog(ML_ERROR, "%s: failed to alloc recovery area, "
+ "retrying\n", dlm->name);
+ msleep(1000);
+ }
+ } while (status != 0);
/* safe to access the node data list without a lock, since this
* process is the only one to change the list */
@@ -452,16 +579,36 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
continue;
}
- status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
- if (status < 0) {
- mlog_errno(status);
- if (dlm_is_host_down(status))
- ndata->state = DLM_RECO_NODE_DATA_DEAD;
- else {
- destroy = 1;
- goto leave;
+ do {
+ status = dlm_request_all_locks(dlm, ndata->node_num,
+ dead_node);
+ if (status < 0) {
+ mlog_errno(status);
+ if (dlm_is_host_down(status)) {
+ /* node died, ignore it for recovery */
+ status = 0;
+ ndata->state = DLM_RECO_NODE_DATA_DEAD;
+ /* wait for the domain map to catch up
+ * with the network state. */
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_dead(dlm,
+ ndata->node_num),
+ msecs_to_jiffies(1000));
+ mlog(0, "waited 1 sec for %u, "
+ "dead? %s\n", ndata->node_num,
+ dlm_is_node_dead(dlm, ndata->node_num) ?
+ "yes" : "no");
+ } else {
+ /* -ENOMEM on the other node */
+ mlog(0, "%s: node %u returned "
+ "%d during recovery, retrying "
+ "after a short wait\n",
+ dlm->name, ndata->node_num,
+ status);
+ msleep(100);
+ }
}
- }
+ } while (status != 0);
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
@@ -473,10 +620,9 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
mlog(0, "node %u died after requesting "
"recovery info for node %u\n",
ndata->node_num, dead_node);
- // start all over
- destroy = 1;
- status = -EAGAIN;
- goto leave;
+ /* fine. don't need this node's info.
+ * continue without it. */
+ break;
case DLM_RECO_NODE_DATA_REQUESTING:
ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
mlog(0, "now receiving recovery data from "
@@ -520,35 +666,26 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
- mlog(ML_NOTICE, "node %u died after "
+ mlog(0, "node %u died after "
"requesting recovery info for "
"node %u\n", ndata->node_num,
dead_node);
- spin_unlock(&dlm_reco_state_lock);
- // start all over
- destroy = 1;
- status = -EAGAIN;
- /* instead of spinning like crazy here,
- * wait for the domain map to catch up
- * with the network state. otherwise this
- * can be hit hundreds of times before
- * the node is really seen as dead. */
- wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_dead(dlm,
- ndata->node_num),
- msecs_to_jiffies(1000));
- mlog(0, "waited 1 sec for %u, "
- "dead? %s\n", ndata->node_num,
- dlm_is_node_dead(dlm, ndata->node_num) ?
- "yes" : "no");
- goto leave;
+ break;
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
+ mlog(0, "%s: node %u still in state %s\n",
+ dlm->name, ndata->node_num,
+ ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
+ "receiving" : "requested");
all_nodes_done = 0;
break;
case DLM_RECO_NODE_DATA_DONE:
+ mlog(0, "%s: node %u state is done\n",
+ dlm->name, ndata->node_num);
break;
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ mlog(0, "%s: node %u state is finalize\n",
+ dlm->name, ndata->node_num);
break;
}
}
@@ -578,7 +715,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
jiffies, dlm->reco.dead_node,
dlm->node_num, dlm->reco.new_master);
destroy = 1;
- status = ret;
+ status = 0;
/* rescan everything marked dirty along the way */
dlm_kick_thread(dlm, NULL);
break;
@@ -591,7 +728,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
}
-leave:
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
@@ -617,7 +753,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
}
BUG_ON(num == dead_node);
- ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
+ ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS);
if (!ndata) {
dlm_destroy_recovery_area(dlm, dead_node);
return -ENOMEM;
@@ -691,16 +827,25 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
if (!dlm_grab(dlm))
return -EINVAL;
+ if (lr->dead_node != dlm->reco.dead_node) {
+ mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
+ "dead_node is %u\n", dlm->name, lr->node_idx,
+ lr->dead_node, dlm->reco.dead_node);
+ dlm_print_reco_node_status(dlm);
+ /* this is a hack */
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
BUG_ON(lr->dead_node != dlm->reco.dead_node);
- item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_NOFS);
if (!item) {
dlm_put(dlm);
return -ENOMEM;
}
/* this will get freed by dlm_request_all_locks_worker */
- buf = (char *) __get_free_page(GFP_KERNEL);
+ buf = (char *) __get_free_page(GFP_NOFS);
if (!buf) {
kfree(item);
dlm_put(dlm);
@@ -715,7 +860,7 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
- schedule_work(&dlm->dispatched_work);
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
dlm_put(dlm);
return 0;
@@ -730,32 +875,34 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
struct list_head *iter;
int ret;
u8 dead_node, reco_master;
+ int skip_all_done = 0;
dlm = item->dlm;
dead_node = item->u.ral.dead_node;
reco_master = item->u.ral.reco_master;
mres = (struct dlm_migratable_lockres *)data;
+ mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
+ dlm->name, dead_node, reco_master);
+
if (dead_node != dlm->reco.dead_node ||
reco_master != dlm->reco.new_master) {
- /* show extra debug info if the recovery state is messed */
- mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
- "request(dead=%u, master=%u)\n",
- dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
- dead_node, reco_master);
- mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
- "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
- dlm->name, mres->lockname_len, mres->lockname, mres->master,
- mres->num_locks, mres->total_locks, mres->flags,
- dlm_get_lock_cookie_node(mres->ml[0].cookie),
- dlm_get_lock_cookie_seq(mres->ml[0].cookie),
- mres->ml[0].list, mres->ml[0].flags,
- mres->ml[0].type, mres->ml[0].convert_type,
- mres->ml[0].highest_blocked, mres->ml[0].node);
- BUG();
+ /* worker could have been created before the recovery master
+ * died. if so, do not continue, but do not error. */
+ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
+ mlog(ML_NOTICE, "%s: will not send recovery state, "
+ "recovery master %u died, thread=(dead=%u,mas=%u)"
+ " current=(dead=%u,mas=%u)\n", dlm->name,
+ reco_master, dead_node, reco_master,
+ dlm->reco.dead_node, dlm->reco.new_master);
+ } else {
+ mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
+ "master=%u), request(dead=%u, master=%u)\n",
+ dlm->name, dlm->reco.dead_node,
+ dlm->reco.new_master, dead_node, reco_master);
+ }
+ goto leave;
}
- BUG_ON(dead_node != dlm->reco.dead_node);
- BUG_ON(reco_master != dlm->reco.new_master);
/* lock resources should have already been moved to the
* dlm->reco.resources list. now move items from that list
@@ -766,12 +913,20 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
/* now we can begin blasting lockreses without the dlm lock */
+
+ /* any errors returned will be due to the new_master dying,
+ * the dlm_reco_thread should detect this */
list_for_each(iter, &resources) {
res = list_entry (iter, struct dlm_lock_resource, recovering);
ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
DLM_MRES_RECOVERY);
- if (ret < 0)
- mlog_errno(ret);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: node %u went down while sending "
+ "recovery state for dead node %u, ret=%d\n", dlm->name,
+ reco_master, dead_node, ret);
+ skip_all_done = 1;
+ break;
+ }
}
/* move the resources back to the list */
@@ -779,10 +934,15 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
list_splice_init(&resources, &dlm->reco.resources);
spin_unlock(&dlm->spinlock);
- ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
- if (ret < 0)
- mlog_errno(ret);
-
+ if (!skip_all_done) {
+ ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: node %u went down while sending "
+ "recovery all-done for dead node %u, ret=%d\n",
+ dlm->name, reco_master, dead_node, ret);
+ }
+ }
+leave:
free_page((unsigned long)data);
}
@@ -801,8 +961,14 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
- /* negative status is ignored by the caller */
- if (ret >= 0)
+ if (ret < 0) {
+ if (!dlm_is_host_down(ret)) {
+ mlog_errno(ret);
+ mlog(ML_ERROR, "%s: unknown error sending data-done "
+ "to %u\n", dlm->name, send_to);
+ BUG();
+ }
+ } else
ret = tmpret;
return ret;
}
@@ -822,7 +988,11 @@ int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
"node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
- BUG_ON(done->dead_node != dlm->reco.dead_node);
+
+ mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
+ "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
+ "node_idx=%u, this node=%u\n", done->dead_node,
+ dlm->reco.dead_node, done->node_idx, dlm->node_num);
spin_lock(&dlm_reco_state_lock);
list_for_each(iter, &dlm->reco.node_data) {
@@ -905,13 +1075,11 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
mlog(0, "found lockres owned by dead node while "
"doing recovery for node %u. sending it.\n",
dead_node);
- list_del_init(&res->recovering);
- list_add_tail(&res->recovering, list);
+ list_move_tail(&res->recovering, list);
} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "found UNKNOWN owner while doing recovery "
"for node %u. sending it.\n", dead_node);
- list_del_init(&res->recovering);
- list_add_tail(&res->recovering, list);
+ list_move_tail(&res->recovering, list);
}
}
spin_unlock(&dlm->spinlock);
@@ -1023,8 +1191,9 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
ml->type == LKM_PRMODE) {
/* if it is already set, this had better be a PR
* and it has to match */
- if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
- memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
+ if (!dlm_lvb_is_empty(mres->lvb) &&
+ (ml->type == LKM_EXMODE ||
+ memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
mlog(ML_ERROR, "mismatched lvbs!\n");
__dlm_print_one_lock_resource(lock->lockres);
BUG();
@@ -1083,22 +1252,25 @@ int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
* we must send it immediately. */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
res, total_locks);
- if (ret < 0) {
- // TODO
- mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
- "returned %d, TODO\n", ret);
- BUG();
- }
+ if (ret < 0)
+ goto error;
}
}
/* flush any remaining locks */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
- if (ret < 0) {
- // TODO
- mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
- "TODO\n", ret);
+ if (ret < 0)
+ goto error;
+ return ret;
+
+error:
+ mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
+ dlm->name, ret);
+ if (!dlm_is_host_down(ret))
BUG();
- }
+ mlog(0, "%s: node %u went down while sending %s "
+ "lockres %.*s\n", dlm->name, send_to,
+ flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
+ res->lockname.len, res->lockname.name);
return ret;
}
@@ -1146,8 +1318,8 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
- buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
- item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
+ item = kcalloc(1, sizeof(*item), GFP_NOFS);
if (!buf || !item)
goto leave;
@@ -1238,7 +1410,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
- schedule_work(&dlm->dispatched_work);
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
leave:
dlm_put(dlm);
@@ -1406,6 +1578,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
struct dlm_ctxt *dlm = data;
struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
struct dlm_lock_resource *res = NULL;
+ unsigned int hash;
int master = DLM_LOCK_RES_OWNER_UNKNOWN;
u32 flags = DLM_ASSERT_MASTER_REQUERY;
@@ -1415,8 +1588,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
return master;
}
+ hash = dlm_lockid_hash(req->name, req->namelen);
+
spin_lock(&dlm->spinlock);
- res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
+ res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
if (res) {
spin_lock(&res->spinlock);
master = res->owner;
@@ -1483,7 +1658,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock *newlock = NULL;
struct dlm_lockstatus *lksb = NULL;
int ret = 0;
- int i;
+ int i, bad;
struct list_head *iter;
struct dlm_lock *lock = NULL;
@@ -1529,8 +1704,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* move the lock to its proper place */
/* do not alter lock refcount. switching lists. */
- list_del_init(&lock->list);
- list_add_tail(&lock->list, queue);
+ list_move_tail(&lock->list, queue);
spin_unlock(&res->spinlock);
mlog(0, "just reordered a local lock!\n");
@@ -1553,28 +1727,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
}
lksb->flags |= (ml->flags &
(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
-
- if (mres->lvb[0]) {
+
+ if (ml->type == LKM_NLMODE)
+ goto skip_lvb;
+
+ if (!dlm_lvb_is_empty(mres->lvb)) {
if (lksb->flags & DLM_LKSB_PUT_LVB) {
/* other node was trying to update
* lvb when node died. recreate the
* lksb with the updated lvb. */
memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
+ /* the lock resource lvb update must happen
+ * NOW, before the spinlock is dropped.
+ * we no longer wait for the AST to update
+ * the lvb. */
+ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else {
/* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
- if (res->lvb[0] && (ml->type == LKM_EXMODE ||
- memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
- mlog(ML_ERROR, "received bad lvb!\n");
- __dlm_print_one_lock_resource(res);
- BUG();
+ if (!dlm_lvb_is_empty(res->lvb) &&
+ (ml->type == LKM_EXMODE ||
+ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
+ int i;
+ mlog(ML_ERROR, "%s:%.*s: received bad "
+ "lvb! type=%d\n", dlm->name,
+ res->lockname.len,
+ res->lockname.name, ml->type);
+ printk("lockres lvb=[");
+ for (i=0; i<DLM_LVB_LEN; i++)
+ printk("%02x", res->lvb[i]);
+ printk("]\nmigrated lvb=[");
+ for (i=0; i<DLM_LVB_LEN; i++)
+ printk("%02x", mres->lvb[i]);
+ printk("]\n");
+ dlm_print_one_lock_resource(res);
+ BUG();
}
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
}
}
-
+skip_lvb:
/* NOTE:
* wrt lock queue ordering and recovery:
@@ -1592,9 +1786,33 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
* relative to each other, but clearly *not*
* preserved relative to locks from other nodes.
*/
+ bad = 0;
spin_lock(&res->spinlock);
- dlm_lock_get(newlock);
- list_add_tail(&newlock->list, queue);
+ list_for_each_entry(lock, queue, list) {
+ if (lock->ml.cookie == ml->cookie) {
+ u64 c = lock->ml.cookie;
+ mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
+ "exists on this lockres!\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(c),
+ dlm_get_lock_cookie_seq(c));
+
+ mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
+ "node=%u, cookie=%u:%llu, queue=%d\n",
+ ml->type, ml->convert_type, ml->node,
+ dlm_get_lock_cookie_node(ml->cookie),
+ dlm_get_lock_cookie_seq(ml->cookie),
+ ml->list);
+
+ __dlm_print_one_lock_resource(res);
+ bad = 1;
+ break;
+ }
+ }
+ if (!bad) {
+ dlm_lock_get(newlock);
+ list_add_tail(&newlock->list, queue);
+ }
spin_unlock(&res->spinlock);
}
mlog(0, "done running all the locks\n");
@@ -1618,8 +1836,14 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
struct dlm_lock *lock;
res->state |= DLM_LOCK_RES_RECOVERING;
- if (!list_empty(&res->recovering))
+ if (!list_empty(&res->recovering)) {
+ mlog(0,
+ "Recovering res %s:%.*s, is already on recovery list!\n",
+ dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->recovering);
+ }
+ /* We need to hold a reference while on the recovery list */
+ dlm_lockres_get(res);
list_add_tail(&res->recovering, &dlm->reco.resources);
/* find any pending locks and put them back on proper list */
@@ -1708,9 +1932,11 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING;
- __dlm_dirty_lockres(dlm, res);
+ if (!__dlm_lockres_unused(res))
+ __dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
+ dlm_lockres_put(res);
}
}
@@ -1719,7 +1945,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
* the RECOVERING state and set the owner
* if necessary */
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
- bucket = &(dlm->lockres_hash[i]);
+ bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
if (res->state & DLM_LOCK_RES_RECOVERING) {
if (res->owner == dead_node) {
@@ -1743,11 +1969,13 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
dlm->name, res->lockname.len,
res->lockname.name, res->owner);
list_del_init(&res->recovering);
+ dlm_lockres_put(res);
}
spin_lock(&res->spinlock);
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING;
- __dlm_dirty_lockres(dlm, res);
+ if (!__dlm_lockres_unused(res))
+ __dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
@@ -1884,7 +2112,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
* need to be fired as a result.
*/
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
- bucket = &(dlm->lockres_hash[i]);
+ bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, iter, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
@@ -1924,6 +2152,20 @@ static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
{
assert_spin_locked(&dlm->spinlock);
+ if (dlm->reco.new_master == idx) {
+ mlog(0, "%s: recovery master %d just died\n",
+ dlm->name, idx);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ /* finalize1 was reached, so it is safe to clear
+ * the new_master and dead_node. that recovery
+ * is complete. */
+ mlog(0, "%s: dead master %d had reached "
+ "finalize1 state, clearing\n", dlm->name, idx);
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ __dlm_reset_recovery(dlm);
+ }
+ }
+
/* check to see if the node is already considered dead */
if (!test_bit(idx, dlm->live_nodes_map)) {
mlog(0, "for domain %s, node %d is already dead. "
@@ -2087,7 +2329,7 @@ again:
/* set the new_master to this node */
spin_lock(&dlm->spinlock);
- dlm->reco.new_master = dlm->node_num;
+ dlm_set_reco_master(dlm, dlm->node_num);
spin_unlock(&dlm->spinlock);
}
@@ -2125,6 +2367,10 @@ again:
mlog(0, "%s: reco master %u is ready to recover %u\n",
dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
status = -EEXIST;
+ } else if (ret == DLM_RECOVERING) {
+ mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
+ dlm->name, dlm->node_num);
+ goto again;
} else {
struct dlm_lock_resource *res;
@@ -2156,7 +2402,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
mlog_entry("%u\n", dead_node);
- mlog(0, "dead node is %u\n", dead_node);
+ mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
@@ -2214,6 +2460,14 @@ retry:
* another ENOMEM */
msleep(100);
goto retry;
+ } else if (ret == EAGAIN) {
+ mlog(0, "%s: trying to start recovery of node "
+ "%u, but node %u is waiting for last recovery "
+ "to complete, backoff for a bit\n", dlm->name,
+ dead_node, nodenum);
+ /* TODO Look into replacing msleep with cond_resched() */
+ msleep(100);
+ goto retry;
}
}
@@ -2229,8 +2483,20 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
if (!dlm_grab(dlm))
return 0;
- mlog(0, "node %u wants to recover node %u\n",
- br->node_idx, br->dead_node);
+ spin_lock(&dlm->spinlock);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
+ "but this node is in finalize state, waiting on finalize2\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
+ spin_unlock(&dlm->spinlock);
+ return EAGAIN;
+ }
+ spin_unlock(&dlm->spinlock);
+
+ mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
@@ -2252,8 +2518,8 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
"node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
- dlm->reco.new_master = br->node_idx;
- dlm->reco.dead_node = br->dead_node;
+ dlm_set_reco_master(dlm, br->node_idx);
+ dlm_set_reco_dead_node(dlm, br->dead_node);
if (!test_bit(br->dead_node, dlm->recovery_map)) {
mlog(0, "recovery master %u sees %u as dead, but this "
"node has not yet. marking %u as dead\n",
@@ -2272,10 +2538,16 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
spin_unlock(&dlm->spinlock);
dlm_kick_recovery_thread(dlm);
+
+ mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
+
dlm_put(dlm);
return 0;
}
+#define DLM_FINALIZE_STAGE2 0x01
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
{
int ret = 0;
@@ -2283,25 +2555,31 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
struct dlm_node_iter iter;
int nodenum;
int status;
+ int stage = 1;
- mlog(0, "finishing recovery for node %s:%u\n",
- dlm->name, dlm->reco.dead_node);
+ mlog(0, "finishing recovery for node %s:%u, "
+ "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
+stage2:
memset(&fr, 0, sizeof(fr));
fr.node_idx = dlm->node_num;
fr.dead_node = dlm->reco.dead_node;
+ if (stage == 2)
+ fr.flags |= DLM_FINALIZE_STAGE2;
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
if (nodenum == dlm->node_num)
continue;
ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
&fr, sizeof(fr), nodenum, &status);
- if (ret >= 0) {
+ if (ret >= 0)
ret = status;
+ if (ret < 0) {
+ mlog_errno(ret);
if (dlm_is_host_down(ret)) {
/* this has no effect on this recovery
* session, so set the status to zero to
@@ -2309,13 +2587,17 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
ret = 0;
+ continue;
}
- }
- if (ret < 0) {
- mlog_errno(ret);
break;
}
}
+ if (stage == 1) {
+ /* reset the node_iter back to the top and send finalize2 */
+ iter.curnode = -1;
+ stage = 2;
+ goto stage2;
+ }
return ret;
}
@@ -2324,14 +2606,19 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
{
struct dlm_ctxt *dlm = data;
struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
+ int stage = 1;
/* ok to return 0, domain has gone away */
if (!dlm_grab(dlm))
return 0;
- mlog(0, "node %u finalizing recovery of node %u\n",
- fr->node_idx, fr->dead_node);
+ if (fr->flags & DLM_FINALIZE_STAGE2)
+ stage = 2;
+ mlog(0, "%s: node %u finalizing recovery stage%d of "
+ "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
+ fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
+
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != fr->node_idx) {
@@ -2347,13 +2634,41 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
BUG();
}
- dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
-
- spin_unlock(&dlm->spinlock);
+ switch (stage) {
+ case 1:
+ dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ mlog(ML_ERROR, "%s: received finalize1 from "
+ "new master %u for dead node %u, but "
+ "this node has already received it!\n",
+ dlm->name, fr->node_idx, fr->dead_node);
+ dlm_print_reco_node_status(dlm);
+ BUG();
+ }
+ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
+ spin_unlock(&dlm->spinlock);
+ break;
+ case 2:
+ if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
+ mlog(ML_ERROR, "%s: received finalize2 from "
+ "new master %u for dead node %u, but "
+ "this node did not have finalize1!\n",
+ dlm->name, fr->node_idx, fr->dead_node);
+ dlm_print_reco_node_status(dlm);
+ BUG();
+ }
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ spin_unlock(&dlm->spinlock);
+ dlm_reset_recovery(dlm);
+ dlm_kick_recovery_thread(dlm);
+ break;
+ default:
+ BUG();
+ }
- dlm_reset_recovery(dlm);
+ mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
+ dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
- dlm_kick_recovery_thread(dlm);
dlm_put(dlm);
return 0;
}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 5be9d14f12cb..0c822f3ffb05 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -39,6 +39,7 @@
#include <linux/inet.h>
#include <linux/timer.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include "cluster/heartbeat.h"
@@ -53,6 +54,8 @@
#include "cluster/masklog.h"
static int dlm_thread(void *data);
+static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *lockres);
static void dlm_flush_asts(struct dlm_ctxt *dlm);
@@ -80,7 +83,7 @@ repeat:
}
-static int __dlm_lockres_unused(struct dlm_lock_resource *res)
+int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
if (list_empty(&res->granted) &&
list_empty(&res->converting) &&
@@ -103,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){
+ /* For now, just keep any resource we master */
+ if (res->owner == dlm->node_num)
+ {
+ if (!list_empty(&res->purge)) {
+ mlog(0, "we master %s:%.*s, but it is on "
+ "the purge list. Removing\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name);
+ list_del_init(&res->purge);
+ dlm->purge_count--;
+ }
+ return;
+ }
+
if (list_empty(&res->purge)) {
mlog(0, "putting lockres %.*s from purge list\n",
res->lockname.len, res->lockname.name);
@@ -110,10 +127,23 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
res->last_used = jiffies;
list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++;
+
+ /* if this node is not the owner, there is
+ * no way to keep track of who the owner could be.
+ * unhash it to avoid serious problems. */
+ if (res->owner != dlm->node_num) {
+ mlog(0, "%s:%.*s: doing immediate "
+ "purge of lockres owned by %u\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, res->owner);
+
+ dlm_purge_lockres_now(dlm, res);
+ }
}
} else if (!list_empty(&res->purge)) {
- mlog(0, "removing lockres %.*s from purge list\n",
- res->lockname.len, res->lockname.name);
+ mlog(0, "removing lockres %.*s from purge list, "
+ "owner=%u\n", res->lockname.len, res->lockname.name,
+ res->owner);
list_del_init(&res->purge);
dlm->purge_count--;
@@ -165,6 +195,7 @@ again:
} else if (ret < 0) {
mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
lockres->lockname.len, lockres->lockname.name);
+ msleep(100);
goto again;
}
@@ -178,6 +209,24 @@ finish:
__dlm_unhash_lockres(lockres);
}
+/* make an unused lockres go away immediately.
+ * as soon as the dlm spinlock is dropped, this lockres
+ * will not be found. kfree still happens on last put. */
+static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *lockres)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&lockres->spinlock);
+
+ BUG_ON(!__dlm_lockres_unused(lockres));
+
+ if (!list_empty(&lockres->purge)) {
+ list_del_init(&lockres->purge);
+ dlm->purge_count--;
+ }
+ __dlm_unhash_lockres(lockres);
+}
+
static void dlm_run_purge_list(struct dlm_ctxt *dlm,
int purge_now)
{
@@ -318,8 +367,7 @@ converting:
target->ml.type = target->ml.convert_type;
target->ml.convert_type = LKM_IVMODE;
- list_del_init(&target->list);
- list_add_tail(&target->list, &res->granted);
+ list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
@@ -380,8 +428,7 @@ blocked:
target->ml.type, target->ml.node);
// target->ml.type is already correct
- list_del_init(&target->list);
- list_add_tail(&target->list, &res->granted);
+ list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
@@ -422,6 +469,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
/* don't shuffle secondary queues */
if ((res->owner == dlm->node_num) &&
!(res->state & DLM_LOCK_RES_DIRTY)) {
+ /* ref for dirty_list */
+ dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
@@ -606,6 +655,8 @@ static int dlm_thread(void *data)
list_del_init(&res->dirty);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
+ /* Drop dirty_list ref */
+ dlm_lockres_put(res);
/* lockres can be re-dirtied/re-added to the
* dirty_list in this gap, but that is ok */
@@ -642,8 +693,9 @@ static int dlm_thread(void *data)
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
- mlog(0, "calling dlm_shuffle_lists with dlm=%p, "
- "res=%p\n", dlm, res);
+ mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
+ "res=%.*s\n", dlm->name,
+ res->lockname.len, res->lockname.name);
/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
@@ -657,6 +709,8 @@ in_progress:
/* if the lock was in-progress, stick
* it on the back of the list */
if (delay) {
+ /* ref for dirty_list */
+ dlm_lockres_get(res);
spin_lock(&res->spinlock);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
@@ -677,7 +731,7 @@ in_progress:
/* yield and continue right away if there is more work to do */
if (!n) {
- yield();
+ cond_resched();
continue;
}
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 7b1a27542674..b0c3134f4f70 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -271,8 +271,7 @@ void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
- list_del_init(&lock->list);
- list_add_tail(&lock->list, &res->granted);
+ list_move_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE;
}
@@ -319,6 +318,16 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ if (owner == dlm->node_num) {
+ /* ended up trying to contact ourself. this means
+ * that the lockres had been remote but became local
+ * via a migration. just retry it, now as local */
+ mlog(0, "%s:%.*s: this node became the master due to a "
+ "migration, re-evaluate now\n", dlm->name,
+ res->lockname.len, res->lockname.name);
+ return DLM_FORWARD;
+ }
+
memset(&unlock, 0, sizeof(unlock));
unlock.node_idx = dlm->node_num;
unlock.flags = cpu_to_be32(flags);
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index 74ca4e5f9765..e641b084b343 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -672,7 +672,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
u32 dlm_key;
char *domain;
- domain = kmalloc(name->len + 1, GFP_KERNEL);
+ domain = kmalloc(name->len + 1, GFP_NOFS);
if (!domain) {
mlog_errno(-ENOMEM);
return ERR_PTR(-ENOMEM);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 64cd52860c87..4acd37286bdd 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -242,7 +242,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
mlog_exit_void();
}
-static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
struct ocfs2_dlm_debug *dlm_debug)
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 84c507961287..35140f6cf840 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -114,7 +114,7 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
extern kmem_cache_t *ocfs2_inode_cache;
-extern struct address_space_operations ocfs2_aops;
+extern const struct address_space_operations ocfs2_aops;
struct buffer_head *ocfs2_bread(struct inode *inode, int block,
int *err, int reada);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index eebc3cfa6be8..910a601b2e98 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -49,7 +49,7 @@
#include "buffer_head_io.h"
-spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(trans_inc_lock);
static int ocfs2_force_read_journal(struct inode *inode);
static int ocfs2_recover_node(struct ocfs2_super *osb,
@@ -222,8 +222,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
OCFS2_I(inode)->ip_handle = handle;
- list_del(&(OCFS2_I(inode)->ip_handle_list));
- list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
+ list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
}
static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index ee42765a8553..cf70fe2075b8 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -988,9 +988,7 @@ int ocfs2_request_mount_vote(struct ocfs2_super *osb)
}
bail:
- if (request)
- kfree(request);
-
+ kfree(request);
return status;
}
@@ -1021,9 +1019,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb)
}
bail:
- if (request)
- kfree(request);
-
+ kfree(request);
return status;
}
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index efc7c91128af..93a56bd4a2b7 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -1,5 +1,4 @@
-/* $Id: inode.c,v 1.15 2001/11/12 09:43:39 davem Exp $
- * openpromfs.c: /proc/openprom handling routines
+/* inode.c: /proc/openprom handling routines
*
* Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
@@ -12,762 +11,245 @@
#include <linux/openprom_fs.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
+#include <asm/prom.h>
#include <asm/uaccess.h>
-#define ALIASES_NNODES 64
-
-typedef struct {
- u16 parent;
- u16 next;
- u16 child;
- u16 first_prop;
- u32 node;
-} openpromfs_node;
-
-typedef struct {
-#define OPP_STRING 0x10
-#define OPP_STRINGLIST 0x20
-#define OPP_BINARY 0x40
-#define OPP_HEXSTRING 0x80
-#define OPP_DIRTY 0x01
-#define OPP_QUOTED 0x02
-#define OPP_NOTQUOTED 0x04
-#define OPP_ASCIIZ 0x08
- u32 flag;
- u32 alloclen;
- u32 len;
- char *value;
- char name[8];
-} openprom_property;
-
-static openpromfs_node *nodes;
-static int alloced;
-static u16 last_node;
-static u16 first_prop;
-static u16 options = 0xffff;
-static u16 aliases = 0xffff;
-static int aliases_nodes;
-static char *alias_names [ALIASES_NNODES];
-
-#define OPENPROM_ROOT_INO 16
-#define OPENPROM_FIRST_INO OPENPROM_ROOT_INO
-#define NODE(ino) nodes[ino - OPENPROM_FIRST_INO]
-#define NODE2INO(node) (node + OPENPROM_FIRST_INO)
-#define NODEP2INO(no) (no + OPENPROM_FIRST_INO + last_node)
-
-static int openpromfs_create (struct inode *, struct dentry *, int, struct nameidata *);
-static int openpromfs_readdir(struct file *, void *, filldir_t);
-static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
-static int openpromfs_unlink (struct inode *, struct dentry *dentry);
+static DEFINE_MUTEX(op_mutex);
-static inline u16 ptr_nod(void *p)
-{
- return (long)p & 0xFFFF;
-}
+#define OPENPROM_ROOT_INO 0
-static ssize_t nodenum_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+enum op_inode_type {
+ op_inode_node,
+ op_inode_prop,
+};
+
+union op_inode_data {
+ struct device_node *node;
+ struct property *prop;
+};
+
+struct op_inode_info {
+ struct inode vfs_inode;
+ enum op_inode_type type;
+ union op_inode_data u;
+};
+
+static inline struct op_inode_info *OP_I(struct inode *inode)
{
- struct inode *inode = file->f_dentry->d_inode;
- char buffer[10];
-
- if (count < 0 || !inode->u.generic_ip)
- return -EINVAL;
- sprintf (buffer, "%8.8lx\n", (long)inode->u.generic_ip);
- if (file->f_pos >= 9)
- return 0;
- if (count > 9 - file->f_pos)
- count = 9 - file->f_pos;
- if (copy_to_user(buf, buffer + file->f_pos, count))
- return -EFAULT;
- *ppos += count;
- return count;
+ return container_of(inode, struct op_inode_info, vfs_inode);
}
-static ssize_t property_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
+static int is_string(unsigned char *p, int len)
{
- struct inode *inode = filp->f_dentry->d_inode;
- int i, j, k;
- u32 node;
- char *p, *s;
- u32 *q;
- openprom_property *op;
- char buffer[64];
-
- if (!filp->private_data) {
- node = nodes[ptr_nod(inode->u.generic_ip)].node;
- i = ((u32)(long)inode->u.generic_ip) >> 16;
- if (ptr_nod(inode->u.generic_ip) == aliases) {
- if (i >= aliases_nodes)
- p = NULL;
- else
- p = alias_names [i];
- } else
- for (p = prom_firstprop (node, buffer);
- i && p && *p;
- p = prom_nextprop (node, p, buffer), i--)
- /* nothing */ ;
- if (!p || !*p)
- return -EIO;
- i = prom_getproplen (node, p);
- if (i < 0) {
- if (ptr_nod(inode->u.generic_ip) == aliases)
- i = 0;
- else
- return -EIO;
- }
- k = i;
- if (i < 64) i = 64;
- filp->private_data = kmalloc (sizeof (openprom_property)
- + (j = strlen (p)) + 2 * i,
- GFP_KERNEL);
- if (!filp->private_data)
- return -ENOMEM;
- op = filp->private_data;
- op->flag = 0;
- op->alloclen = 2 * i;
- strcpy (op->name, p);
- op->value = (char *)(((unsigned long)(op->name + j + 4)) & ~3);
- op->len = k;
- if (k && prom_getproperty (node, p, op->value, i) < 0)
- return -EIO;
- op->value [k] = 0;
- if (k) {
- for (s = NULL, p = op->value; p < op->value + k; p++) {
- if ((*p >= ' ' && *p <= '~') || *p == '\n') {
- op->flag |= OPP_STRING;
- s = p;
- continue;
- }
- if (p > op->value && !*p && s == p - 1) {
- if (p < op->value + k - 1)
- op->flag |= OPP_STRINGLIST;
- else
- op->flag |= OPP_ASCIIZ;
- continue;
- }
- if (k == 1 && !*p) {
- op->flag |= (OPP_STRING|OPP_ASCIIZ);
- break;
- }
- op->flag &= ~(OPP_STRING|OPP_STRINGLIST);
- if (k & 3)
- op->flag |= OPP_HEXSTRING;
- else
- op->flag |= OPP_BINARY;
- break;
- }
- if (op->flag & OPP_STRINGLIST)
- op->flag &= ~(OPP_STRING);
- if (op->flag & OPP_ASCIIZ)
- op->len--;
- }
- } else
- op = filp->private_data;
- if (!count || !(op->len || (op->flag & OPP_ASCIIZ)))
- return 0;
- if (*ppos >= 0xffffff || count >= 0xffffff)
- return -EINVAL;
- if (op->flag & OPP_STRINGLIST) {
- for (k = 0, p = op->value; p < op->value + op->len; p++)
- if (!*p)
- k++;
- i = op->len + 4 * k + 3;
- } else if (op->flag & OPP_STRING) {
- i = op->len + 3;
- } else if (op->flag & OPP_BINARY) {
- i = (op->len * 9) >> 2;
- } else {
- i = (op->len << 1) + 1;
- }
- k = *ppos;
- if (k >= i) return 0;
- if (count > i - k) count = i - k;
- if (op->flag & OPP_STRING) {
- if (!k) {
- if (put_user('\'', buf))
- return -EFAULT;
- k++;
- count--;
- }
+ int i;
- if (k + count >= i - 2)
- j = i - 2 - k;
- else
- j = count;
-
- if (j >= 0) {
- if (copy_to_user(buf + k - *ppos,
- op->value + k - 1, j))
- return -EFAULT;
- count -= j;
- k += j;
- }
+ for (i = 0; i < len; i++) {
+ unsigned char val = p[i];
- if (count) {
- if (put_user('\'', &buf [k++ - *ppos]))
- return -EFAULT;
- }
- if (count > 1) {
- if (put_user('\n', &buf [k++ - *ppos]))
- return -EFAULT;
- }
- } else if (op->flag & OPP_STRINGLIST) {
- char *tmp;
-
- tmp = kmalloc (i, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- s = tmp;
- *s++ = '\'';
- for (p = op->value; p < op->value + op->len; p++) {
- if (!*p) {
- strcpy(s, "' + '");
- s += 5;
- continue;
- }
- *s++ = *p;
- }
- strcpy(s, "'\n");
-
- if (copy_to_user(buf, tmp + k, count))
- return -EFAULT;
-
- kfree(tmp);
- k += count;
-
- } else if (op->flag & OPP_BINARY) {
- char buffer[10];
- u32 *first, *last;
- int first_off, last_cnt;
-
- first = ((u32 *)op->value) + k / 9;
- first_off = k % 9;
- last = ((u32 *)op->value) + (k + count - 1) / 9;
- last_cnt = (k + count) % 9;
- if (!last_cnt) last_cnt = 9;
-
- if (first == last) {
- sprintf (buffer, "%08x.", *first);
- if (copy_to_user(buf, buffer + first_off,
- last_cnt - first_off))
- return -EFAULT;
- buf += last_cnt - first_off;
- } else {
- for (q = first; q <= last; q++) {
- sprintf (buffer, "%08x.", *q);
- if (q == first) {
- if (copy_to_user(buf, buffer + first_off,
- 9 - first_off))
- return -EFAULT;
- buf += 9 - first_off;
- } else if (q == last) {
- if (copy_to_user(buf, buffer, last_cnt))
- return -EFAULT;
- buf += last_cnt;
- } else {
- if (copy_to_user(buf, buffer, 9))
- return -EFAULT;
- buf += 9;
- }
- }
- }
+ if ((i && !val) ||
+ (val >= ' ' && val <= '~'))
+ continue;
- if (last == (u32 *)(op->value + op->len - 4) && last_cnt == 9) {
- if (put_user('\n', (buf - 1)))
- return -EFAULT;
- }
+ return 0;
+ }
- k += count;
+ return 1;
+}
- } else if (op->flag & OPP_HEXSTRING) {
- char buffer[3];
+static int property_show(struct seq_file *f, void *v)
+{
+ struct property *prop = f->private;
+ void *pval;
+ int len;
- if ((k < i - 1) && (k & 1)) {
- sprintf (buffer, "%02x",
- (unsigned char) *(op->value + (k >> 1)) & 0xff);
- if (put_user(buffer[1], &buf[k++ - *ppos]))
- return -EFAULT;
- count--;
- }
+ len = prop->length;
+ pval = prop->value;
- for (; (count > 1) && (k < i - 1); k += 2) {
- sprintf (buffer, "%02x",
- (unsigned char) *(op->value + (k >> 1)) & 0xff);
- if (copy_to_user(buf + k - *ppos, buffer, 2))
- return -EFAULT;
- count -= 2;
- }
+ if (is_string(pval, len)) {
+ while (len > 0) {
+ int n = strlen(pval);
- if (count && (k < i - 1)) {
- sprintf (buffer, "%02x",
- (unsigned char) *(op->value + (k >> 1)) & 0xff);
- if (put_user(buffer[0], &buf[k++ - *ppos]))
- return -EFAULT;
- count--;
- }
+ seq_printf(f, "%s", (char *) pval);
- if (count) {
- if (put_user('\n', &buf [k++ - *ppos]))
- return -EFAULT;
- }
- }
- count = k - *ppos;
- *ppos = k;
- return count;
-}
+ /* Skip over the NULL byte too. */
+ pval += n + 1;
+ len -= n + 1;
-static ssize_t property_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- int i, j, k;
- char *p;
- u32 *q;
- void *b;
- openprom_property *op;
-
- if (*ppos >= 0xffffff || count >= 0xffffff)
- return -EINVAL;
- if (!filp->private_data) {
- i = property_read (filp, NULL, 0, NULL);
- if (i)
- return i;
- }
- k = *ppos;
- op = filp->private_data;
- if (!(op->flag & OPP_STRING)) {
- u32 *first, *last;
- int first_off, last_cnt;
- u32 mask, mask2;
- char tmp [9];
- int forcelen = 0;
-
- j = k % 9;
- for (i = 0; i < count; i++, j++) {
- if (j == 9) j = 0;
- if (!j) {
- char ctmp;
- if (get_user(ctmp, &buf[i]))
- return -EFAULT;
- if (ctmp != '.') {
- if (ctmp != '\n') {
- if (op->flag & OPP_BINARY)
- return -EINVAL;
- else
- goto write_try_string;
- } else {
- count = i + 1;
- forcelen = 1;
- break;
- }
- }
- } else {
- char ctmp;
- if (get_user(ctmp, &buf[i]))
- return -EFAULT;
- if (ctmp < '0' ||
- (ctmp > '9' && ctmp < 'A') ||
- (ctmp > 'F' && ctmp < 'a') ||
- ctmp > 'f') {
- if (op->flag & OPP_BINARY)
- return -EINVAL;
- else
- goto write_try_string;
- }
- }
- }
- op->flag |= OPP_BINARY;
- tmp [8] = 0;
- i = ((count + k + 8) / 9) << 2;
- if (op->alloclen <= i) {
- b = kmalloc (sizeof (openprom_property) + 2 * i,
- GFP_KERNEL);
- if (!b)
- return -ENOMEM;
- memcpy (b, filp->private_data,
- sizeof (openprom_property)
- + strlen (op->name) + op->alloclen);
- memset (b + sizeof (openprom_property)
- + strlen (op->name) + op->alloclen,
- 0, 2 * i - op->alloclen);
- op = b;
- op->alloclen = 2*i;
- b = filp->private_data;
- filp->private_data = op;
- kfree (b);
+ if (len > 0)
+ seq_printf(f, " + ");
}
- first = ((u32 *)op->value) + (k / 9);
- first_off = k % 9;
- last = (u32 *)(op->value + i);
- last_cnt = (k + count) % 9;
- if (first + 1 == last) {
- memset (tmp, '0', 8);
- if (copy_from_user(tmp + first_off, buf,
- (count + first_off > 8) ?
- 8 - first_off : count))
- return -EFAULT;
- mask = 0xffffffff;
- mask2 = 0xffffffff;
- for (j = 0; j < first_off; j++)
- mask >>= 1;
- for (j = 8 - count - first_off; j > 0; j--)
- mask2 <<= 1;
- mask &= mask2;
- if (mask) {
- *first &= ~mask;
- *first |= simple_strtoul (tmp, NULL, 16);
- op->flag |= OPP_DIRTY;
+ } else {
+ if (len & 3) {
+ while (len) {
+ len--;
+ if (len)
+ seq_printf(f, "%02x.",
+ *(unsigned char *) pval);
+ else
+ seq_printf(f, "%02x",
+ *(unsigned char *) pval);
+ pval++;
}
} else {
- op->flag |= OPP_DIRTY;
- for (q = first; q < last; q++) {
- if (q == first) {
- if (first_off < 8) {
- memset (tmp, '0', 8);
- if (copy_from_user(tmp + first_off,
- buf,
- 8 - first_off))
- return -EFAULT;
- mask = 0xffffffff;
- for (j = 0; j < first_off; j++)
- mask >>= 1;
- *q &= ~mask;
- *q |= simple_strtoul (tmp,NULL,16);
- }
- buf += 9;
- } else if ((q == last - 1) && last_cnt
- && (last_cnt < 8)) {
- memset (tmp, '0', 8);
- if (copy_from_user(tmp, buf, last_cnt))
- return -EFAULT;
- mask = 0xffffffff;
- for (j = 0; j < 8 - last_cnt; j++)
- mask <<= 1;
- *q &= ~mask;
- *q |= simple_strtoul (tmp, NULL, 16);
- buf += last_cnt;
- } else {
- char tchars[2 * sizeof(long) + 1];
-
- if (copy_from_user(tchars, buf, sizeof(tchars) - 1))
- return -EFAULT;
- tchars[sizeof(tchars) - 1] = '\0';
- *q = simple_strtoul (tchars, NULL, 16);
- buf += 9;
- }
- }
- }
- if (!forcelen) {
- if (op->len < i)
- op->len = i;
- } else
- op->len = i;
- *ppos += count;
- }
-write_try_string:
- if (!(op->flag & OPP_BINARY)) {
- if (!(op->flag & (OPP_QUOTED | OPP_NOTQUOTED))) {
- char ctmp;
-
- /* No way, if somebody starts writing from the middle,
- * we don't know whether he uses quotes around or not
- */
- if (k > 0)
- return -EINVAL;
- if (get_user(ctmp, buf))
- return -EFAULT;
- if (ctmp == '\'') {
- op->flag |= OPP_QUOTED;
- buf++;
- count--;
- (*ppos)++;
- if (!count) {
- op->flag |= OPP_STRING;
- return 1;
- }
- } else
- op->flag |= OPP_NOTQUOTED;
- }
- op->flag |= OPP_STRING;
- if (op->alloclen <= count + *ppos) {
- b = kmalloc (sizeof (openprom_property)
- + 2 * (count + *ppos), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
- memcpy (b, filp->private_data,
- sizeof (openprom_property)
- + strlen (op->name) + op->alloclen);
- memset (b + sizeof (openprom_property)
- + strlen (op->name) + op->alloclen,
- 0, 2*(count - *ppos) - op->alloclen);
- op = b;
- op->alloclen = 2*(count + *ppos);
- b = filp->private_data;
- filp->private_data = op;
- kfree (b);
- }
- p = op->value + *ppos - ((op->flag & OPP_QUOTED) ? 1 : 0);
- if (copy_from_user(p, buf, count))
- return -EFAULT;
- op->flag |= OPP_DIRTY;
- for (i = 0; i < count; i++, p++)
- if (*p == '\n') {
- *p = 0;
- break;
+ while (len >= 4) {
+ len -= 4;
+
+ if (len)
+ seq_printf(f, "%08x.",
+ *(unsigned int *) pval);
+ else
+ seq_printf(f, "%08x",
+ *(unsigned int *) pval);
+ pval += 4;
}
- if (i < count) {
- op->len = p - op->value;
- *ppos += i + 1;
- if ((p > op->value) && (op->flag & OPP_QUOTED)
- && (*(p - 1) == '\''))
- op->len--;
- } else {
- if (p - op->value > op->len)
- op->len = p - op->value;
- *ppos += count;
}
}
- return *ppos - k;
+ seq_printf(f, "\n");
+
+ return 0;
}
-int property_release (struct inode *inode, struct file *filp)
+static void *property_start(struct seq_file *f, loff_t *pos)
{
- openprom_property *op = filp->private_data;
- int error;
- u32 node;
-
- if (!op)
- return 0;
- lock_kernel();
- node = nodes[ptr_nod(inode->u.generic_ip)].node;
- if (ptr_nod(inode->u.generic_ip) == aliases) {
- if ((op->flag & OPP_DIRTY) && (op->flag & OPP_STRING)) {
- char *p = op->name;
- int i = (op->value - op->name) - strlen (op->name) - 1;
- op->value [op->len] = 0;
- *(op->value - 1) = ' ';
- if (i) {
- for (p = op->value - i - 2; p >= op->name; p--)
- p[i] = *p;
- p = op->name + i;
- }
- memcpy (p - 8, "nvalias ", 8);
- prom_feval (p - 8);
- }
- } else if (op->flag & OPP_DIRTY) {
- if (op->flag & OPP_STRING) {
- op->value [op->len] = 0;
- error = prom_setprop (node, op->name,
- op->value, op->len + 1);
- if (error <= 0)
- printk (KERN_WARNING "openpromfs: "
- "Couldn't write property %s\n",
- op->name);
- } else if ((op->flag & OPP_BINARY) || !op->len) {
- error = prom_setprop (node, op->name,
- op->value, op->len);
- if (error <= 0)
- printk (KERN_WARNING "openpromfs: "
- "Couldn't write property %s\n",
- op->name);
- } else {
- printk (KERN_WARNING "openpromfs: "
- "Unknown property type of %s\n",
- op->name);
- }
+ if (*pos == 0)
+ return pos;
+ return NULL;
+}
+
+static void *property_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void property_stop(struct seq_file *f, void *v)
+{
+ /* Nothing to do */
+}
+
+static struct seq_operations property_op = {
+ .start = property_start,
+ .next = property_next,
+ .stop = property_stop,
+ .show = property_show
+};
+
+static int property_open(struct inode *inode, struct file *file)
+{
+ struct op_inode_info *oi = OP_I(inode);
+ int ret;
+
+ BUG_ON(oi->type != op_inode_prop);
+
+ ret = seq_open(file, &property_op);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = oi->u.prop;
}
- unlock_kernel();
- kfree (filp->private_data);
- return 0;
+ return ret;
}
static const struct file_operations openpromfs_prop_ops = {
- .read = property_read,
- .write = property_write,
- .release = property_release,
+ .open = property_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
};
-static const struct file_operations openpromfs_nodenum_ops = {
- .read = nodenum_read,
-};
+static int openpromfs_readdir(struct file *, void *, filldir_t);
static const struct file_operations openprom_operations = {
.read = generic_read_dir,
.readdir = openpromfs_readdir,
};
-static struct inode_operations openprom_alias_inode_operations = {
- .create = openpromfs_create,
- .lookup = openpromfs_lookup,
- .unlink = openpromfs_unlink,
-};
+static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *);
static struct inode_operations openprom_inode_operations = {
.lookup = openpromfs_lookup,
};
-static int lookup_children(u16 n, const char * name, int len)
+static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
- int ret;
- u16 node;
- for (; n != 0xffff; n = nodes[n].next) {
- node = nodes[n].child;
- if (node != 0xffff) {
- char buffer[128];
- int i;
- char *p;
-
- while (node != 0xffff) {
- if (prom_getname (nodes[node].node,
- buffer, 128) >= 0) {
- i = strlen (buffer);
- if ((len == i)
- && !strncmp (buffer, name, len))
- return NODE2INO(node);
- p = strchr (buffer, '@');
- if (p && (len == p - buffer)
- && !strncmp (buffer, name, len))
- return NODE2INO(node);
- }
- node = nodes[node].next;
- }
- } else
- continue;
- ret = lookup_children (nodes[n].child, name, len);
- if (ret) return ret;
- }
- return 0;
-}
-
-static struct dentry *openpromfs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
-{
- int ino = 0;
-#define OPFSL_DIR 0
-#define OPFSL_PROPERTY 1
-#define OPFSL_NODENUM 2
- int type = 0;
- char buffer[128];
- char *p;
+ struct op_inode_info *ent_oi, *oi = OP_I(dir);
+ struct device_node *dp, *child;
+ struct property *prop;
+ enum op_inode_type ent_type;
+ union op_inode_data ent_data;
const char *name;
- u32 n;
- u16 dirnode;
- unsigned int len;
- int i;
struct inode *inode;
- char buffer2[64];
+ unsigned int ino;
+ int len;
- inode = NULL;
+ BUG_ON(oi->type != op_inode_node);
+
+ dp = oi->u.node;
+
name = dentry->d_name.name;
len = dentry->d_name.len;
- lock_kernel();
- if (name [0] == '.' && len == 5 && !strncmp (name + 1, "node", 4)) {
- ino = NODEP2INO(NODE(dir->i_ino).first_prop);
- type = OPFSL_NODENUM;
- }
- if (!ino) {
- u16 node = NODE(dir->i_ino).child;
- while (node != 0xffff) {
- if (prom_getname (nodes[node].node, buffer, 128) >= 0) {
- i = strlen (buffer);
- if (len == i && !strncmp (buffer, name, len)) {
- ino = NODE2INO(node);
- type = OPFSL_DIR;
- break;
- }
- p = strchr (buffer, '@');
- if (p && (len == p - buffer)
- && !strncmp (buffer, name, len)) {
- ino = NODE2INO(node);
- type = OPFSL_DIR;
- break;
- }
- }
- node = nodes[node].next;
- }
- }
- n = NODE(dir->i_ino).node;
- dirnode = dir->i_ino - OPENPROM_FIRST_INO;
- if (!ino) {
- int j = NODEP2INO(NODE(dir->i_ino).first_prop);
- if (dirnode != aliases) {
- for (p = prom_firstprop (n, buffer2);
- p && *p;
- p = prom_nextprop (n, p, buffer2)) {
- j++;
- if ((len == strlen (p))
- && !strncmp (p, name, len)) {
- ino = j;
- type = OPFSL_PROPERTY;
- break;
- }
- }
- } else {
- int k;
- for (k = 0; k < aliases_nodes; k++) {
- j++;
- if (alias_names [k]
- && (len == strlen (alias_names [k]))
- && !strncmp (alias_names [k], name, len)) {
- ino = j;
- type = OPFSL_PROPERTY;
- break;
- }
- }
+
+ mutex_lock(&op_mutex);
+
+ child = dp->child;
+ while (child) {
+ int n = strlen(child->path_component_name);
+
+ if (len == n &&
+ !strncmp(child->path_component_name, name, len)) {
+ ent_type = op_inode_node;
+ ent_data.node = child;
+ ino = child->unique_id;
+ goto found;
}
+ child = child->sibling;
}
- if (!ino) {
- ino = lookup_children (NODE(dir->i_ino).child, name, len);
- if (ino)
- type = OPFSL_DIR;
- else {
- unlock_kernel();
- return ERR_PTR(-ENOENT);
+
+ prop = dp->properties;
+ while (prop) {
+ int n = strlen(prop->name);
+
+ if (len == n && !strncmp(prop->name, name, len)) {
+ ent_type = op_inode_prop;
+ ent_data.prop = prop;
+ ino = prop->unique_id;
+ goto found;
}
+
+ prop = prop->next;
}
- inode = iget (dir->i_sb, ino);
- unlock_kernel();
+
+ mutex_unlock(&op_mutex);
+ return ERR_PTR(-ENOENT);
+
+found:
+ inode = iget(dir->i_sb, ino);
+ mutex_unlock(&op_mutex);
if (!inode)
return ERR_PTR(-EINVAL);
- switch (type) {
- case OPFSL_DIR:
+ ent_oi = OP_I(inode);
+ ent_oi->type = ent_type;
+ ent_oi->u = ent_data;
+
+ switch (ent_type) {
+ case op_inode_node:
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (ino == OPENPROM_FIRST_INO + aliases) {
- inode->i_mode |= S_IWUSR;
- inode->i_op = &openprom_alias_inode_operations;
- } else
- inode->i_op = &openprom_inode_operations;
+ inode->i_op = &openprom_inode_operations;
inode->i_fop = &openprom_operations;
inode->i_nlink = 2;
break;
- case OPFSL_NODENUM:
- inode->i_mode = S_IFREG | S_IRUGO;
- inode->i_fop = &openpromfs_nodenum_ops;
- inode->i_nlink = 1;
- inode->u.generic_ip = (void *)(long)(n);
- break;
- case OPFSL_PROPERTY:
- if ((dirnode == options) && (len == 17)
- && !strncmp (name, "security-password", 17))
+ case op_inode_prop:
+ if (!strcmp(dp->name, "options") && (len == 17) &&
+ !strncmp (name, "security-password", 17))
inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
- else {
+ else
inode->i_mode = S_IFREG | S_IRUGO;
- if (dirnode == options || dirnode == aliases) {
- if (len != 4 || strncmp (name, "name", 4))
- inode->i_mode |= S_IWUSR;
- }
- }
inode->i_fop = &openpromfs_prop_ops;
inode->i_nlink = 1;
- if (inode->i_size < 0)
- inode->i_size = 0;
- inode->u.generic_ip = (void *)(long)(((u16)dirnode) |
- (((u16)(ino - NODEP2INO(NODE(dir->i_ino).first_prop) - 1)) << 16));
+ inode->i_size = ent_oi->u.prop->length;
break;
}
@@ -781,237 +263,89 @@ static struct dentry *openpromfs_lookup(struct inode * dir, struct dentry *dentr
static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
+ struct op_inode_info *oi = OP_I(inode);
+ struct device_node *dp = oi->u.node;
+ struct device_node *child;
+ struct property *prop;
unsigned int ino;
- u32 n;
- int i, j;
- char buffer[128];
- u16 node;
- char *p;
- char buffer2[64];
-
- lock_kernel();
+ int i;
+
+ mutex_lock(&op_mutex);
ino = inode->i_ino;
i = filp->f_pos;
switch (i) {
case 0:
- if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ goto out;
i++;
filp->f_pos++;
/* fall thru */
case 1:
- if (filldir(dirent, "..", 2, i,
- (NODE(ino).parent == 0xffff) ?
- OPENPROM_ROOT_INO : NODE2INO(NODE(ino).parent), DT_DIR) < 0)
+ if (filldir(dirent, "..", 2, i,
+ (dp->parent == NULL ?
+ OPENPROM_ROOT_INO :
+ dp->parent->unique_id), DT_DIR) < 0)
goto out;
i++;
filp->f_pos++;
/* fall thru */
default:
i -= 2;
- node = NODE(ino).child;
- while (i && node != 0xffff) {
- node = nodes[node].next;
+
+ /* First, the children nodes as directories. */
+ child = dp->child;
+ while (i && child) {
+ child = child->sibling;
i--;
}
- while (node != 0xffff) {
- if (prom_getname (nodes[node].node, buffer, 128) < 0)
- goto out;
- if (filldir(dirent, buffer, strlen(buffer),
- filp->f_pos, NODE2INO(node), DT_DIR) < 0)
+ while (child) {
+ if (filldir(dirent,
+ child->path_component_name,
+ strlen(child->path_component_name),
+ filp->f_pos, child->unique_id, DT_DIR) < 0)
goto out;
+
filp->f_pos++;
- node = nodes[node].next;
+ child = child->sibling;
}
- j = NODEP2INO(NODE(ino).first_prop);
- if (!i) {
- if (filldir(dirent, ".node", 5, filp->f_pos, j, DT_REG) < 0)
+
+ /* Next, the properties as files. */
+ prop = dp->properties;
+ while (i && prop) {
+ prop = prop->next;
+ i--;
+ }
+ while (prop) {
+ if (filldir(dirent, prop->name, strlen(prop->name),
+ filp->f_pos, prop->unique_id, DT_REG) < 0)
goto out;
+
filp->f_pos++;
- } else
- i--;
- n = NODE(ino).node;
- if (ino == OPENPROM_FIRST_INO + aliases) {
- for (j++; i < aliases_nodes; i++, j++) {
- if (alias_names [i]) {
- if (filldir (dirent, alias_names [i],
- strlen (alias_names [i]),
- filp->f_pos, j, DT_REG) < 0) goto out;
- filp->f_pos++;
- }
- }
- } else {
- for (p = prom_firstprop (n, buffer2);
- p && *p;
- p = prom_nextprop (n, p, buffer2)) {
- j++;
- if (i) i--;
- else {
- if (filldir(dirent, p, strlen(p),
- filp->f_pos, j, DT_REG) < 0)
- goto out;
- filp->f_pos++;
- }
- }
+ prop = prop->next;
}
}
out:
- unlock_kernel();
- return 0;
-}
-
-static int openpromfs_create (struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
-{
- char *p;
- struct inode *inode;
-
- if (!dir)
- return -ENOENT;
- if (dentry->d_name.len > 256)
- return -EINVAL;
- p = kmalloc (dentry->d_name.len + 1, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- strncpy (p, dentry->d_name.name, dentry->d_name.len);
- p [dentry->d_name.len] = 0;
- lock_kernel();
- if (aliases_nodes == ALIASES_NNODES) {
- kfree(p);
- unlock_kernel();
- return -EIO;
- }
- alias_names [aliases_nodes++] = p;
- inode = iget (dir->i_sb,
- NODEP2INO(NODE(dir->i_ino).first_prop) + aliases_nodes);
- if (!inode) {
- unlock_kernel();
- return -EINVAL;
- }
- inode->i_mode = S_IFREG | S_IRUGO | S_IWUSR;
- inode->i_fop = &openpromfs_prop_ops;
- inode->i_nlink = 1;
- if (inode->i_size < 0) inode->i_size = 0;
- inode->u.generic_ip = (void *)(long)(((u16)aliases) |
- (((u16)(aliases_nodes - 1)) << 16));
- unlock_kernel();
- d_instantiate(dentry, inode);
+ mutex_unlock(&op_mutex);
return 0;
}
-static int openpromfs_unlink (struct inode *dir, struct dentry *dentry)
-{
- unsigned int len;
- char *p;
- const char *name;
- int i;
-
- name = dentry->d_name.name;
- len = dentry->d_name.len;
- lock_kernel();
- for (i = 0; i < aliases_nodes; i++)
- if ((strlen (alias_names [i]) == len)
- && !strncmp (name, alias_names[i], len)) {
- char buffer[512];
-
- p = alias_names [i];
- alias_names [i] = NULL;
- kfree (p);
- strcpy (buffer, "nvunalias ");
- memcpy (buffer + 10, name, len);
- buffer [10 + len] = 0;
- prom_feval (buffer);
- }
- unlock_kernel();
- return 0;
-}
+static kmem_cache_t *op_inode_cachep;
-/* {{{ init section */
-static int __init check_space (u16 n)
+static struct inode *openprom_alloc_inode(struct super_block *sb)
{
- unsigned long pages;
+ struct op_inode_info *oi;
- if ((1 << alloced) * PAGE_SIZE < (n + 2) * sizeof(openpromfs_node)) {
- pages = __get_free_pages (GFP_KERNEL, alloced + 1);
- if (!pages)
- return -1;
+ oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
+ if (!oi)
+ return NULL;
- if (nodes) {
- memcpy ((char *)pages, nodes,
- (1 << alloced) * PAGE_SIZE);
- free_pages ((unsigned long)nodes, alloced);
- }
- alloced++;
- nodes = (openpromfs_node *)pages;
- }
- return 0;
+ return &oi->vfs_inode;
}
-static u16 __init get_nodes (u16 parent, u32 node)
+static void openprom_destroy_inode(struct inode *inode)
{
- char *p;
- u16 n = last_node++, i;
- char buffer[64];
-
- if (check_space (n) < 0)
- return 0xffff;
- nodes[n].parent = parent;
- nodes[n].node = node;
- nodes[n].next = 0xffff;
- nodes[n].child = 0xffff;
- nodes[n].first_prop = first_prop++;
- if (!parent) {
- char buffer[8];
- int j;
-
- if ((j = prom_getproperty (node, "name", buffer, 8)) >= 0) {
- buffer[j] = 0;
- if (!strcmp (buffer, "options"))
- options = n;
- else if (!strcmp (buffer, "aliases"))
- aliases = n;
- }
- }
- if (n != aliases)
- for (p = prom_firstprop (node, buffer);
- p && p != (char *)-1 && *p;
- p = prom_nextprop (node, p, buffer))
- first_prop++;
- else {
- char *q;
- for (p = prom_firstprop (node, buffer);
- p && p != (char *)-1 && *p;
- p = prom_nextprop (node, p, buffer)) {
- if (aliases_nodes == ALIASES_NNODES)
- break;
- for (i = 0; i < aliases_nodes; i++)
- if (!strcmp (p, alias_names [i]))
- break;
- if (i < aliases_nodes)
- continue;
- q = kmalloc (strlen (p) + 1, GFP_KERNEL);
- if (!q)
- return 0xffff;
- strcpy (q, p);
- alias_names [aliases_nodes++] = q;
- }
- first_prop += ALIASES_NNODES;
- }
- node = prom_getchild (node);
- if (node) {
- parent = get_nodes (n, node);
- if (parent == 0xffff)
- return 0xffff;
- nodes[n].child = parent;
- while ((node = prom_getsibling (node)) != 0) {
- i = get_nodes (n, node);
- if (i == 0xffff)
- return 0xffff;
- nodes[parent].next = i;
- parent = i;
- }
- }
- return n;
+ kmem_cache_free(op_inode_cachep, OP_I(inode));
}
static void openprom_read_inode(struct inode * inode)
@@ -1031,6 +365,8 @@ static int openprom_remount(struct super_block *sb, int *flags, char *data)
}
static struct super_operations openprom_sops = {
+ .alloc_inode = openprom_alloc_inode,
+ .destroy_inode = openprom_destroy_inode,
.read_inode = openprom_read_inode,
.statfs = simple_statfs,
.remount_fs = openprom_remount,
@@ -1038,7 +374,8 @@ static struct super_operations openprom_sops = {
static int openprom_fill_super(struct super_block *s, void *data, int silent)
{
- struct inode * root_inode;
+ struct inode *root_inode;
+ struct op_inode_info *oi;
s->s_flags |= MS_NOATIME;
s->s_blocksize = 1024;
@@ -1049,6 +386,11 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
root_inode = iget(s, OPENPROM_ROOT_INO);
if (!root_inode)
goto out_no_root;
+
+ oi = OP_I(root_inode);
+ oi->type = op_inode_node;
+ oi->u.node = of_find_node_by_path("/");
+
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_no_root;
@@ -1073,29 +415,39 @@ static struct file_system_type openprom_fs_type = {
.kill_sb = kill_anon_super,
};
+static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags)
+{
+ struct op_inode_info *oi = (struct op_inode_info *) data;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&oi->vfs_inode);
+}
+
static int __init init_openprom_fs(void)
{
- nodes = (openpromfs_node *)__get_free_pages(GFP_KERNEL, 0);
- if (!nodes) {
- printk (KERN_WARNING "openpromfs: can't get free page\n");
- return -EIO;
- }
- if (get_nodes (0xffff, prom_root_node) == 0xffff) {
- printk (KERN_WARNING "openpromfs: couldn't setup tree\n");
- return -EIO;
- }
- nodes[last_node].first_prop = first_prop;
- return register_filesystem(&openprom_fs_type);
+ int err;
+
+ op_inode_cachep = kmem_cache_create("op_inode_cache",
+ sizeof(struct op_inode_info),
+ 0,
+ (SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ op_inode_init_once, NULL);
+ if (!op_inode_cachep)
+ return -ENOMEM;
+
+ err = register_filesystem(&openprom_fs_type);
+ if (err)
+ kmem_cache_destroy(op_inode_cachep);
+
+ return err;
}
static void __exit exit_openprom_fs(void)
{
- int i;
unregister_filesystem(&openprom_fs_type);
- free_pages ((unsigned long)nodes, alloced);
- for (i = 0; i < aliases_nodes; i++)
- kfree (alias_names [i]);
- nodes = NULL;
+ kmem_cache_destroy(op_inode_cachep);
}
module_init(init_openprom_fs)
diff --git a/fs/pnode.c b/fs/pnode.c
index 37b568ed0e05..da42ee61c1df 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -53,8 +53,7 @@ static int do_make_slave(struct vfsmount *mnt)
if (master) {
list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
slave_mnt->mnt_master = master;
- list_del(&mnt->mnt_slave);
- list_add(&mnt->mnt_slave, &master->mnt_slave_list);
+ list_move(&mnt->mnt_slave, &master->mnt_slave_list);
list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
} else {
@@ -283,10 +282,8 @@ static void __propagate_umount(struct vfsmount *mnt)
* umount the child only if the child has no
* other children
*/
- if (child && list_empty(&child->mnt_mounts)) {
- list_del(&child->mnt_hash);
- list_add_tail(&child->mnt_hash, &mnt->mnt_hash);
- }
+ if (child && list_empty(&child->mnt_mounts))
+ list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
}
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6afff725a8c9..6ba7785319de 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -74,6 +74,16 @@
#include <linux/poll.h>
#include "internal.h"
+/* NOTE:
+ * Implementing inode permission operations in /proc is almost
+ * certainly an error. Permission checks need to happen during
+ * each system call not at open time. The reason is that most of
+ * what we wish to check for permissions in /proc varies at runtime.
+ *
+ * The classic example of a problem is opening file descriptors
+ * in /proc for a task before it execs a suid executable.
+ */
+
/*
* For hysterical raisins we keep the same inumbers as in the old procfs.
* Feel free to change the macro below - just keep the range distinct from
@@ -121,6 +131,8 @@ enum pid_directory_inos {
PROC_TGID_ATTR_PREV,
PROC_TGID_ATTR_EXEC,
PROC_TGID_ATTR_FSCREATE,
+ PROC_TGID_ATTR_KEYCREATE,
+ PROC_TGID_ATTR_SOCKCREATE,
#endif
#ifdef CONFIG_AUDITSYSCALL
PROC_TGID_LOGINUID,
@@ -162,6 +174,8 @@ enum pid_directory_inos {
PROC_TID_ATTR_PREV,
PROC_TID_ATTR_EXEC,
PROC_TID_ATTR_FSCREATE,
+ PROC_TID_ATTR_KEYCREATE,
+ PROC_TID_ATTR_SOCKCREATE,
#endif
#ifdef CONFIG_AUDITSYSCALL
PROC_TID_LOGINUID,
@@ -173,6 +187,9 @@ enum pid_directory_inos {
PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */
};
+/* Worst case buffer size needed for holding an integer. */
+#define PROC_NUMBUF 10
+
struct pid_entry {
int type;
int len;
@@ -275,6 +292,8 @@ static struct pid_entry tgid_attr_stuff[] = {
E(PROC_TGID_ATTR_PREV, "prev", S_IFREG|S_IRUGO),
E(PROC_TGID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO),
E(PROC_TGID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+ E(PROC_TGID_ATTR_KEYCREATE, "keycreate", S_IFREG|S_IRUGO|S_IWUGO),
+ E(PROC_TGID_ATTR_SOCKCREATE, "sockcreate", S_IFREG|S_IRUGO|S_IWUGO),
{0,0,NULL,0}
};
static struct pid_entry tid_attr_stuff[] = {
@@ -282,6 +301,8 @@ static struct pid_entry tid_attr_stuff[] = {
E(PROC_TID_ATTR_PREV, "prev", S_IFREG|S_IRUGO),
E(PROC_TID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO),
E(PROC_TID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
+ E(PROC_TID_ATTR_KEYCREATE, "keycreate", S_IFREG|S_IRUGO|S_IWUGO),
+ E(PROC_TID_ATTR_SOCKCREATE, "sockcreate", S_IFREG|S_IRUGO|S_IWUGO),
{0,0,NULL,0}
};
#endif
@@ -290,12 +311,15 @@ static struct pid_entry tid_attr_stuff[] = {
static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
{
- struct task_struct *task = proc_task(inode);
- struct files_struct *files;
+ struct task_struct *task = get_proc_task(inode);
+ struct files_struct *files = NULL;
struct file *file;
- int fd = proc_type(inode) - PROC_TID_FD_DIR;
+ int fd = proc_fd(inode);
- files = get_files_struct(task);
+ if (task) {
+ files = get_files_struct(task);
+ put_task_struct(task);
+ }
if (files) {
/*
* We are not taking a ref to the file structure, so we must
@@ -327,29 +351,33 @@ static struct fs_struct *get_fs_struct(struct task_struct *task)
return fs;
}
-static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+static int get_nr_threads(struct task_struct *tsk)
{
- struct fs_struct *fs = get_fs_struct(proc_task(inode));
- int result = -ENOENT;
- if (fs) {
- read_lock(&fs->lock);
- *mnt = mntget(fs->pwdmnt);
- *dentry = dget(fs->pwd);
- read_unlock(&fs->lock);
- result = 0;
- put_fs_struct(fs);
+ /* Must be called with the rcu_read_lock held */
+ unsigned long flags;
+ int count = 0;
+
+ if (lock_task_sighand(tsk, &flags)) {
+ count = atomic_read(&tsk->signal->count);
+ unlock_task_sighand(tsk, &flags);
}
- return result;
+ return count;
}
-static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
{
- struct fs_struct *fs = get_fs_struct(proc_task(inode));
+ struct task_struct *task = get_proc_task(inode);
+ struct fs_struct *fs = NULL;
int result = -ENOENT;
+
+ if (task) {
+ fs = get_fs_struct(task);
+ put_task_struct(task);
+ }
if (fs) {
read_lock(&fs->lock);
- *mnt = mntget(fs->rootmnt);
- *dentry = dget(fs->root);
+ *mnt = mntget(fs->pwdmnt);
+ *dentry = dget(fs->pwd);
read_unlock(&fs->lock);
result = 0;
put_fs_struct(fs);
@@ -357,42 +385,16 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
return result;
}
-
-/* Same as proc_root_link, but this addionally tries to get fs from other
- * threads in the group */
-static int proc_task_root_link(struct inode *inode, struct dentry **dentry,
- struct vfsmount **mnt)
+static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
{
- struct fs_struct *fs;
+ struct task_struct *task = get_proc_task(inode);
+ struct fs_struct *fs = NULL;
int result = -ENOENT;
- struct task_struct *leader = proc_task(inode);
- task_lock(leader);
- fs = leader->fs;
- if (fs) {
- atomic_inc(&fs->count);
- task_unlock(leader);
- } else {
- /* Try to get fs from other threads */
- task_unlock(leader);
- read_lock(&tasklist_lock);
- if (pid_alive(leader)) {
- struct task_struct *task = leader;
-
- while ((task = next_thread(task)) != leader) {
- task_lock(task);
- fs = task->fs;
- if (fs) {
- atomic_inc(&fs->count);
- task_unlock(task);
- break;
- }
- task_unlock(task);
- }
- }
- read_unlock(&tasklist_lock);
+ if (task) {
+ fs = get_fs_struct(task);
+ put_task_struct(task);
}
-
if (fs) {
read_lock(&fs->lock);
*mnt = mntget(fs->rootmnt);
@@ -404,7 +406,6 @@ static int proc_task_root_link(struct inode *inode, struct dentry **dentry,
return result;
}
-
#define MAY_PTRACE(task) \
(task == current || \
(task->parent == current && \
@@ -535,142 +536,22 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
/************************************************************************/
/* permission checks */
-
-/* If the process being read is separated by chroot from the reading process,
- * don't let the reader access the threads.
- *
- * note: this does dput(root) and mntput(vfsmnt) on exit.
- */
-static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
-{
- struct dentry *de, *base;
- struct vfsmount *our_vfsmnt, *mnt;
- int res = 0;
-
- read_lock(&current->fs->lock);
- our_vfsmnt = mntget(current->fs->rootmnt);
- base = dget(current->fs->root);
- read_unlock(&current->fs->lock);
-
- spin_lock(&vfsmount_lock);
- de = root;
- mnt = vfsmnt;
-
- while (mnt != our_vfsmnt) {
- if (mnt == mnt->mnt_parent)
- goto out;
- de = mnt->mnt_mountpoint;
- mnt = mnt->mnt_parent;
- }
-
- if (!is_subdir(de, base))
- goto out;
- spin_unlock(&vfsmount_lock);
-
-exit:
- dput(base);
- mntput(our_vfsmnt);
- dput(root);
- mntput(vfsmnt);
- return res;
-out:
- spin_unlock(&vfsmount_lock);
- res = -EACCES;
- goto exit;
-}
-
-static int proc_check_root(struct inode *inode)
-{
- struct dentry *root;
- struct vfsmount *vfsmnt;
-
- if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */
- return -ENOENT;
- return proc_check_chroot(root, vfsmnt);
-}
-
-static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
-{
- if (generic_permission(inode, mask, NULL) != 0)
- return -EACCES;
- return proc_check_root(inode);
-}
-
-static int proc_task_permission(struct inode *inode, int mask, struct nameidata *nd)
-{
- struct dentry *root;
- struct vfsmount *vfsmnt;
-
- if (generic_permission(inode, mask, NULL) != 0)
- return -EACCES;
-
- if (proc_task_root_link(inode, &root, &vfsmnt))
- return -ENOENT;
-
- return proc_check_chroot(root, vfsmnt);
-}
-
-extern struct seq_operations proc_pid_maps_op;
-static int maps_open(struct inode *inode, struct file *file)
-{
- struct task_struct *task = proc_task(inode);
- int ret = seq_open(file, &proc_pid_maps_op);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = task;
- }
- return ret;
-}
-
-static struct file_operations proc_maps_operations = {
- .open = maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#ifdef CONFIG_NUMA
-extern struct seq_operations proc_pid_numa_maps_op;
-static int numa_maps_open(struct inode *inode, struct file *file)
-{
- struct task_struct *task = proc_task(inode);
- int ret = seq_open(file, &proc_pid_numa_maps_op);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = task;
- }
- return ret;
-}
-
-static struct file_operations proc_numa_maps_operations = {
- .open = numa_maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-#endif
-
-#ifdef CONFIG_MMU
-extern struct seq_operations proc_pid_smaps_op;
-static int smaps_open(struct inode *inode, struct file *file)
+static int proc_fd_access_allowed(struct inode *inode)
{
- struct task_struct *task = proc_task(inode);
- int ret = seq_open(file, &proc_pid_smaps_op);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = task;
+ struct task_struct *task;
+ int allowed = 0;
+ /* Allow access to a task's file descriptors if it is us or we
+ * may use ptrace attach to the process and find out that
+ * information.
+ */
+ task = get_proc_task(inode);
+ if (task) {
+ allowed = ptrace_may_attach(task);
+ put_task_struct(task);
}
- return ret;
+ return allowed;
}
-static struct file_operations proc_smaps_operations = {
- .open = smaps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-#endif
-
extern struct seq_operations mounts_op;
struct proc_mounts {
struct seq_file m;
@@ -679,16 +560,19 @@ struct proc_mounts {
static int mounts_open(struct inode *inode, struct file *file)
{
- struct task_struct *task = proc_task(inode);
- struct namespace *namespace;
+ struct task_struct *task = get_proc_task(inode);
+ struct namespace *namespace = NULL;
struct proc_mounts *p;
int ret = -EINVAL;
- task_lock(task);
- namespace = task->namespace;
- if (namespace)
- get_namespace(namespace);
- task_unlock(task);
+ if (task) {
+ task_lock(task);
+ namespace = task->namespace;
+ if (namespace)
+ get_namespace(namespace);
+ task_unlock(task);
+ put_task_struct(task);
+ }
if (namespace) {
ret = -ENOMEM;
@@ -745,17 +629,21 @@ static struct file_operations proc_mounts_operations = {
extern struct seq_operations mountstats_op;
static int mountstats_open(struct inode *inode, struct file *file)
{
- struct task_struct *task = proc_task(inode);
int ret = seq_open(file, &mountstats_op);
if (!ret) {
struct seq_file *m = file->private_data;
- struct namespace *namespace;
- task_lock(task);
- namespace = task->namespace;
- if (namespace)
- get_namespace(namespace);
- task_unlock(task);
+ struct namespace *namespace = NULL;
+ struct task_struct *task = get_proc_task(inode);
+
+ if (task) {
+ task_lock(task);
+ namespace = task->namespace;
+ if (namespace)
+ get_namespace(namespace);
+ task_unlock(task);
+ put_task_struct(task);
+ }
if (namespace)
m->private = namespace;
@@ -782,18 +670,27 @@ static ssize_t proc_info_read(struct file * file, char __user * buf,
struct inode * inode = file->f_dentry->d_inode;
unsigned long page;
ssize_t length;
- struct task_struct *task = proc_task(inode);
+ struct task_struct *task = get_proc_task(inode);
+
+ length = -ESRCH;
+ if (!task)
+ goto out_no_task;
if (count > PROC_BLOCK_SIZE)
count = PROC_BLOCK_SIZE;
+
+ length = -ENOMEM;
if (!(page = __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
+ goto out;
length = PROC_I(inode)->op.proc_read(task, (char*)page);
if (length >= 0)
length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
free_page(page);
+out:
+ put_task_struct(task);
+out_no_task:
return length;
}
@@ -810,12 +707,15 @@ static int mem_open(struct inode* inode, struct file* file)
static ssize_t mem_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = proc_task(file->f_dentry->d_inode);
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
char *page;
unsigned long src = *ppos;
int ret = -ESRCH;
struct mm_struct *mm;
+ if (!task)
+ goto out_no_task;
+
if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
goto out;
@@ -865,6 +765,8 @@ out_put:
out_free:
free_page((unsigned long) page);
out:
+ put_task_struct(task);
+out_no_task:
return ret;
}
@@ -877,15 +779,20 @@ static ssize_t mem_write(struct file * file, const char * buf,
{
int copied = 0;
char *page;
- struct task_struct *task = proc_task(file->f_dentry->d_inode);
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
unsigned long dst = *ppos;
+ copied = -ESRCH;
+ if (!task)
+ goto out_no_task;
+
if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
- return -ESRCH;
+ goto out;
+ copied = -ENOMEM;
page = (char *)__get_free_page(GFP_USER);
if (!page)
- return -ENOMEM;
+ goto out;
while (count > 0) {
int this_len, retval;
@@ -908,6 +815,9 @@ static ssize_t mem_write(struct file * file, const char * buf,
}
*ppos = dst;
free_page((unsigned long) page);
+out:
+ put_task_struct(task);
+out_no_task:
return copied;
}
#endif
@@ -938,13 +848,18 @@ static struct file_operations proc_mem_operations = {
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = proc_task(file->f_dentry->d_inode);
- char buffer[8];
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ char buffer[PROC_NUMBUF];
size_t len;
- int oom_adjust = task->oomkilladj;
+ int oom_adjust;
loff_t __ppos = *ppos;
- len = sprintf(buffer, "%i\n", oom_adjust);
+ if (!task)
+ return -ESRCH;
+ oom_adjust = task->oomkilladj;
+ put_task_struct(task);
+
+ len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
if (__ppos >= len)
return 0;
if (count > len-__ppos)
@@ -958,15 +873,15 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = proc_task(file->f_dentry->d_inode);
- char buffer[8], *end;
+ struct task_struct *task;
+ char buffer[PROC_NUMBUF], *end;
int oom_adjust;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- memset(buffer, 0, 8);
- if (count > 6)
- count = 6;
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
oom_adjust = simple_strtol(buffer, &end, 0);
@@ -974,7 +889,11 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
return -EINVAL;
if (*end == '\n')
end++;
+ task = get_proc_task(file->f_dentry->d_inode);
+ if (!task)
+ return -ESRCH;
task->oomkilladj = oom_adjust;
+ put_task_struct(task);
if (end - buffer == 0)
return -EIO;
return end - buffer;
@@ -985,22 +904,21 @@ static struct file_operations proc_oom_adjust_operations = {
.write = oom_adjust_write,
};
-static struct inode_operations proc_mem_inode_operations = {
- .permission = proc_permission,
-};
-
#ifdef CONFIG_AUDITSYSCALL
#define TMPBUFLEN 21
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
struct inode * inode = file->f_dentry->d_inode;
- struct task_struct *task = proc_task(inode);
+ struct task_struct *task = get_proc_task(inode);
ssize_t length;
char tmpbuf[TMPBUFLEN];
+ if (!task)
+ return -ESRCH;
length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
audit_get_loginuid(task->audit_context));
+ put_task_struct(task);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -1010,13 +928,12 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
struct inode * inode = file->f_dentry->d_inode;
char *page, *tmp;
ssize_t length;
- struct task_struct *task = proc_task(inode);
uid_t loginuid;
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
- if (current != task)
+ if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
return -EPERM;
if (count >= PAGE_SIZE)
@@ -1040,7 +957,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
goto out_free_page;
}
- length = audit_set_loginuid(task, loginuid);
+ length = audit_set_loginuid(current, loginuid);
if (likely(length == 0))
length = count;
@@ -1059,13 +976,16 @@ static struct file_operations proc_loginuid_operations = {
static ssize_t seccomp_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *tsk = proc_task(file->f_dentry->d_inode);
+ struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
char __buf[20];
loff_t __ppos = *ppos;
size_t len;
+ if (!tsk)
+ return -ESRCH;
/* no need to print the trailing zero, so use only len */
len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
+ put_task_struct(tsk);
if (__ppos >= len)
return 0;
if (count > len - __ppos)
@@ -1079,29 +999,43 @@ static ssize_t seccomp_read(struct file *file, char __user *buf,
static ssize_t seccomp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *tsk = proc_task(file->f_dentry->d_inode);
+ struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
char __buf[20], *end;
unsigned int seccomp_mode;
+ ssize_t result;
+
+ result = -ESRCH;
+ if (!tsk)
+ goto out_no_task;
/* can set it only once to be even more secure */
+ result = -EPERM;
if (unlikely(tsk->seccomp.mode))
- return -EPERM;
+ goto out;
+ result = -EFAULT;
memset(__buf, 0, sizeof(__buf));
count = min(count, sizeof(__buf) - 1);
if (copy_from_user(__buf, buf, count))
- return -EFAULT;
+ goto out;
+
seccomp_mode = simple_strtoul(__buf, &end, 0);
if (*end == '\n')
end++;
+ result = -EINVAL;
if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
tsk->seccomp.mode = seccomp_mode;
set_tsk_thread_flag(tsk, TIF_SECCOMP);
} else
- return -EINVAL;
+ goto out;
+ result = -EIO;
if (unlikely(!(end - __buf)))
- return -EIO;
- return end - __buf;
+ goto out;
+ result = end - __buf;
+out:
+ put_task_struct(tsk);
+out_no_task:
+ return result;
}
static struct file_operations proc_seccomp_operations = {
@@ -1118,10 +1052,8 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
/* We don't need a base pointer in the /proc filesystem */
path_release(nd);
- if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE))
- goto out;
- error = proc_check_root(inode);
- if (error)
+ /* Are we allowed to snoop on the tasks file descriptors? */
+ if (!proc_fd_access_allowed(inode))
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
@@ -1163,12 +1095,8 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
struct dentry *de;
struct vfsmount *mnt = NULL;
- lock_kernel();
-
- if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE))
- goto out;
- error = proc_check_root(inode);
- if (error)
+ /* Are we allowed to snoop on the tasks file descriptors? */
+ if (!proc_fd_access_allowed(inode))
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
@@ -1179,7 +1107,6 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
dput(de);
mntput(mnt);
out:
- unlock_kernel();
return error;
}
@@ -1188,21 +1115,20 @@ static struct inode_operations proc_pid_link_inode_operations = {
.follow_link = proc_pid_follow_link
};
-#define NUMBUF 10
-
static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_dentry->d_inode;
- struct task_struct *p = proc_task(inode);
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *p = get_proc_task(inode);
unsigned int fd, tid, ino;
int retval;
- char buf[NUMBUF];
+ char buf[PROC_NUMBUF];
struct files_struct * files;
struct fdtable *fdt;
retval = -ENOENT;
- if (!pid_alive(p))
- goto out;
+ if (!p)
+ goto out_no_task;
retval = 0;
tid = p->pid;
@@ -1213,7 +1139,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
goto out;
filp->f_pos++;
case 1:
- ino = fake_ino(tid, PROC_TID_INO);
+ ino = parent_ino(dentry);
if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
goto out;
filp->f_pos++;
@@ -1232,7 +1158,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
continue;
rcu_read_unlock();
- j = NUMBUF;
+ j = PROC_NUMBUF;
i = fd;
do {
j--;
@@ -1241,7 +1167,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
} while (i);
ino = fake_ino(tid, PROC_TID_FD_DIR + fd);
- if (filldir(dirent, buf+j, NUMBUF-j, fd+2, ino, DT_LNK) < 0) {
+ if (filldir(dirent, buf+j, PROC_NUMBUF-j, fd+2, ino, DT_LNK) < 0) {
rcu_read_lock();
break;
}
@@ -1251,6 +1177,8 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
put_files_struct(files);
}
out:
+ put_task_struct(p);
+out_no_task:
return retval;
}
@@ -1262,16 +1190,18 @@ static int proc_pident_readdir(struct file *filp,
int pid;
struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode;
+ struct task_struct *task = get_proc_task(inode);
struct pid_entry *p;
ino_t ino;
int ret;
ret = -ENOENT;
- if (!pid_alive(proc_task(inode)))
+ if (!task)
goto out;
ret = 0;
- pid = proc_task(inode)->pid;
+ pid = task->pid;
+ put_task_struct(task);
i = filp->f_pos;
switch (i) {
case 0:
@@ -1354,22 +1284,19 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
/* Common stuff */
ei = PROC_I(inode);
- ei->task = NULL;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_ino = fake_ino(task->pid, ino);
- if (!pid_alive(task))
- goto out_unlock;
-
/*
* grab the reference to task.
*/
- get_task_struct(task);
- ei->task = task;
- ei->type = ino;
+ ei->pid = get_pid(task->pids[PIDTYPE_PID].pid);
+ if (!ei->pid)
+ goto out_unlock;
+
inode->i_uid = 0;
inode->i_gid = 0;
- if (ino == PROC_TGID_INO || ino == PROC_TID_INO || task_dumpable(task)) {
+ if (task_dumpable(task)) {
inode->i_uid = task->euid;
inode->i_gid = task->egid;
}
@@ -1379,7 +1306,6 @@ out:
return inode;
out_unlock:
- ei->pde = NULL;
iput(inode);
return NULL;
}
@@ -1393,13 +1319,21 @@ out_unlock:
*
* Rewrite the inode's ownerships here because the owning task may have
* performed a setuid(), etc.
+ *
+ * Before the /proc/pid/status file was created the only way to read
+ * the effective uid of a /process was to stat /proc/pid. Reading
+ * /proc/pid/status is slow enough that procps and other packages
+ * kept stating /proc/pid. To keep the rules in /proc simple I have
+ * made this apply to all per process world readable and executable
+ * directories.
*/
static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
- struct task_struct *task = proc_task(inode);
- if (pid_alive(task)) {
- if (proc_type(inode) == PROC_TGID_INO || proc_type(inode) == PROC_TID_INO || task_dumpable(task)) {
+ struct task_struct *task = get_proc_task(inode);
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+ task_dumpable(task)) {
inode->i_uid = task->euid;
inode->i_gid = task->egid;
} else {
@@ -1407,59 +1341,75 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
inode->i_gid = 0;
}
security_task_to_inode(task, inode);
+ put_task_struct(task);
return 1;
}
d_drop(dentry);
return 0;
}
+static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task;
+ generic_fillattr(inode, stat);
+
+ rcu_read_lock();
+ stat->uid = 0;
+ stat->gid = 0;
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+ task_dumpable(task)) {
+ stat->uid = task->euid;
+ stat->gid = task->egid;
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
- struct task_struct *task = proc_task(inode);
- int fd = proc_type(inode) - PROC_TID_FD_DIR;
+ struct task_struct *task = get_proc_task(inode);
+ int fd = proc_fd(inode);
struct files_struct *files;
- files = get_files_struct(task);
- if (files) {
- rcu_read_lock();
- if (fcheck_files(files, fd)) {
+ if (task) {
+ files = get_files_struct(task);
+ if (files) {
+ rcu_read_lock();
+ if (fcheck_files(files, fd)) {
+ rcu_read_unlock();
+ put_files_struct(files);
+ if (task_dumpable(task)) {
+ inode->i_uid = task->euid;
+ inode->i_gid = task->egid;
+ } else {
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ }
+ security_task_to_inode(task, inode);
+ put_task_struct(task);
+ return 1;
+ }
rcu_read_unlock();
put_files_struct(files);
- if (task_dumpable(task)) {
- inode->i_uid = task->euid;
- inode->i_gid = task->egid;
- } else {
- inode->i_uid = 0;
- inode->i_gid = 0;
- }
- security_task_to_inode(task, inode);
- return 1;
}
- rcu_read_unlock();
- put_files_struct(files);
+ put_task_struct(task);
}
d_drop(dentry);
return 0;
}
-static void pid_base_iput(struct dentry *dentry, struct inode *inode)
-{
- struct task_struct *task = proc_task(inode);
- spin_lock(&task->proc_lock);
- if (task->proc_dentry == dentry)
- task->proc_dentry = NULL;
- spin_unlock(&task->proc_lock);
- iput(inode);
-}
-
static int pid_delete_dentry(struct dentry * dentry)
{
/* Is the task we represent dead?
* If so, then don't put the dentry on the lru list,
* kill it immediately.
*/
- return !pid_alive(proc_task(dentry->d_inode));
+ return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
}
static struct dentry_operations tid_fd_dentry_operations =
@@ -1474,13 +1424,6 @@ static struct dentry_operations pid_dentry_operations =
.d_delete = pid_delete_dentry,
};
-static struct dentry_operations pid_base_dentry_operations =
-{
- .d_revalidate = pid_revalidate,
- .d_iput = pid_base_iput,
- .d_delete = pid_delete_dentry,
-};
-
/* Lookups */
static unsigned name_to_int(struct dentry *dentry)
@@ -1508,22 +1451,24 @@ out:
/* SMP-safe */
static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
{
- struct task_struct *task = proc_task(dir);
+ struct task_struct *task = get_proc_task(dir);
unsigned fd = name_to_int(dentry);
+ struct dentry *result = ERR_PTR(-ENOENT);
struct file * file;
struct files_struct * files;
struct inode *inode;
struct proc_inode *ei;
+ if (!task)
+ goto out_no_task;
if (fd == ~0U)
goto out;
- if (!pid_alive(task))
- goto out;
inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_FD_DIR+fd);
if (!inode)
goto out;
ei = PROC_I(inode);
+ ei->fd = fd;
files = get_files_struct(task);
if (!files)
goto out_unlock;
@@ -1548,19 +1493,25 @@ static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry,
ei->op.proc_get_link = proc_fd_link;
dentry->d_op = &tid_fd_dentry_operations;
d_add(dentry, inode);
- return NULL;
+ /* Close the race of the process dying before we return the dentry */
+ if (tid_fd_revalidate(dentry, NULL))
+ result = NULL;
+out:
+ put_task_struct(task);
+out_no_task:
+ return result;
out_unlock2:
spin_unlock(&files->file_lock);
put_files_struct(files);
out_unlock:
iput(inode);
-out:
- return ERR_PTR(-ENOENT);
+ goto out;
}
static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir);
static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd);
+static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
static struct file_operations proc_fd_operations = {
.read = generic_read_dir,
@@ -1577,12 +1528,11 @@ static struct file_operations proc_task_operations = {
*/
static struct inode_operations proc_fd_inode_operations = {
.lookup = proc_lookupfd,
- .permission = proc_permission,
};
static struct inode_operations proc_task_inode_operations = {
.lookup = proc_task_lookup,
- .permission = proc_task_permission,
+ .getattr = proc_task_getattr,
};
#ifdef CONFIG_SECURITY
@@ -1592,12 +1542,17 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
struct inode * inode = file->f_dentry->d_inode;
unsigned long page;
ssize_t length;
- struct task_struct *task = proc_task(inode);
+ struct task_struct *task = get_proc_task(inode);
+
+ length = -ESRCH;
+ if (!task)
+ goto out_no_task;
if (count > PAGE_SIZE)
count = PAGE_SIZE;
+ length = -ENOMEM;
if (!(page = __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
+ goto out;
length = security_getprocattr(task,
(char*)file->f_dentry->d_name.name,
@@ -1605,6 +1560,9 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
if (length >= 0)
length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
free_page(page);
+out:
+ put_task_struct(task);
+out_no_task:
return length;
}
@@ -1614,26 +1572,36 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
struct inode * inode = file->f_dentry->d_inode;
char *page;
ssize_t length;
- struct task_struct *task = proc_task(inode);
+ struct task_struct *task = get_proc_task(inode);
+ length = -ESRCH;
+ if (!task)
+ goto out_no_task;
if (count > PAGE_SIZE)
count = PAGE_SIZE;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ length = -ENOMEM;
page = (char*)__get_free_page(GFP_USER);
if (!page)
- return -ENOMEM;
+ goto out;
+
length = -EFAULT;
if (copy_from_user(page, buf, count))
- goto out;
+ goto out_free;
length = security_setprocattr(task,
(char*)file->f_dentry->d_name.name,
(void*)page, count);
-out:
+out_free:
free_page((unsigned long) page);
+out:
+ put_task_struct(task);
+out_no_task:
return length;
}
@@ -1648,24 +1616,22 @@ static struct file_operations proc_tgid_attr_operations;
static struct inode_operations proc_tgid_attr_inode_operations;
#endif
-static int get_tid_list(int index, unsigned int *tids, struct inode *dir);
-
/* SMP-safe */
static struct dentry *proc_pident_lookup(struct inode *dir,
struct dentry *dentry,
struct pid_entry *ents)
{
struct inode *inode;
- int error;
- struct task_struct *task = proc_task(dir);
+ struct dentry *error;
+ struct task_struct *task = get_proc_task(dir);
struct pid_entry *p;
struct proc_inode *ei;
- error = -ENOENT;
+ error = ERR_PTR(-ENOENT);
inode = NULL;
- if (!pid_alive(task))
- goto out;
+ if (!task)
+ goto out_no_task;
for (p = ents; p->name; p++) {
if (p->len != dentry->d_name.len)
@@ -1676,7 +1642,7 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
if (!p->name)
goto out;
- error = -EINVAL;
+ error = ERR_PTR(-EINVAL);
inode = proc_pid_make_inode(dir->i_sb, task, p->type);
if (!inode)
goto out;
@@ -1689,7 +1655,7 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
*/
switch(p->type) {
case PROC_TGID_TASK:
- inode->i_nlink = 2 + get_tid_list(2, NULL, dir);
+ inode->i_nlink = 2;
inode->i_op = &proc_task_inode_operations;
inode->i_fop = &proc_task_operations;
break;
@@ -1759,7 +1725,6 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
#endif
case PROC_TID_MEM:
case PROC_TGID_MEM:
- inode->i_op = &proc_mem_inode_operations;
inode->i_fop = &proc_mem_operations;
break;
#ifdef CONFIG_SECCOMP
@@ -1801,6 +1766,10 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
case PROC_TGID_ATTR_EXEC:
case PROC_TID_ATTR_FSCREATE:
case PROC_TGID_ATTR_FSCREATE:
+ case PROC_TID_ATTR_KEYCREATE:
+ case PROC_TGID_ATTR_KEYCREATE:
+ case PROC_TID_ATTR_SOCKCREATE:
+ case PROC_TGID_ATTR_SOCKCREATE:
inode->i_fop = &proc_pid_attr_operations;
break;
#endif
@@ -1842,14 +1811,18 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
default:
printk("procfs: impossible type (%d)",p->type);
iput(inode);
- return ERR_PTR(-EINVAL);
+ error = ERR_PTR(-EINVAL);
+ goto out;
}
dentry->d_op = &pid_dentry_operations;
d_add(dentry, inode);
- return NULL;
-
+ /* Close the race of the process dying before we return the dentry */
+ if (pid_revalidate(dentry, NULL))
+ error = NULL;
out:
- return ERR_PTR(error);
+ put_task_struct(task);
+out_no_task:
+ return error;
}
static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
@@ -1872,10 +1845,12 @@ static struct file_operations proc_tid_base_operations = {
static struct inode_operations proc_tgid_base_inode_operations = {
.lookup = proc_tgid_base_lookup,
+ .getattr = pid_getattr,
};
static struct inode_operations proc_tid_base_inode_operations = {
.lookup = proc_tid_base_lookup,
+ .getattr = pid_getattr,
};
#ifdef CONFIG_SECURITY
@@ -1917,10 +1892,12 @@ static struct dentry *proc_tid_attr_lookup(struct inode *dir,
static struct inode_operations proc_tgid_attr_inode_operations = {
.lookup = proc_tgid_attr_lookup,
+ .getattr = pid_getattr,
};
static struct inode_operations proc_tid_attr_inode_operations = {
.lookup = proc_tid_attr_lookup,
+ .getattr = pid_getattr,
};
#endif
@@ -1930,14 +1907,14 @@ static struct inode_operations proc_tid_attr_inode_operations = {
static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
- char tmp[30];
+ char tmp[PROC_NUMBUF];
sprintf(tmp, "%d", current->tgid);
return vfs_readlink(dentry,buffer,buflen,tmp);
}
static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- char tmp[30];
+ char tmp[PROC_NUMBUF];
sprintf(tmp, "%d", current->tgid);
return ERR_PTR(vfs_follow_link(nd,tmp));
}
@@ -1948,67 +1925,80 @@ static struct inode_operations proc_self_inode_operations = {
};
/**
- * proc_pid_unhash - Unhash /proc/@pid entry from the dcache.
- * @p: task that should be flushed.
+ * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
+ *
+ * @task: task that should be flushed.
+ *
+ * Looks in the dcache for
+ * /proc/@pid
+ * /proc/@tgid/task/@pid
+ * if either directory is present flushes it and all of it'ts children
+ * from the dcache.
*
- * Drops the /proc/@pid dcache entry from the hash chains.
+ * It is safe and reasonable to cache /proc entries for a task until
+ * that task exits. After that they just clog up the dcache with
+ * useless entries, possibly causing useful dcache entries to be
+ * flushed instead. This routine is proved to flush those useless
+ * dcache entries at process exit time.
*
- * Dropping /proc/@pid entries and detach_pid must be synchroneous,
- * otherwise e.g. /proc/@pid/exe might point to the wrong executable,
- * if the pid value is immediately reused. This is enforced by
- * - caller must acquire spin_lock(p->proc_lock)
- * - must be called before detach_pid()
- * - proc_pid_lookup acquires proc_lock, and checks that
- * the target is not dead by looking at the attach count
- * of PIDTYPE_PID.
+ * NOTE: This routine is just an optimization so it does not guarantee
+ * that no dcache entries will exist at process exit time it
+ * just makes it very unlikely that any will persist.
*/
-
-struct dentry *proc_pid_unhash(struct task_struct *p)
+void proc_flush_task(struct task_struct *task)
{
- struct dentry *proc_dentry;
+ struct dentry *dentry, *leader, *dir;
+ char buf[PROC_NUMBUF];
+ struct qstr name;
+
+ name.name = buf;
+ name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
+ dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+ if (dentry) {
+ shrink_dcache_parent(dentry);
+ d_drop(dentry);
+ dput(dentry);
+ }
- proc_dentry = p->proc_dentry;
- if (proc_dentry != NULL) {
+ if (thread_group_leader(task))
+ goto out;
- spin_lock(&dcache_lock);
- spin_lock(&proc_dentry->d_lock);
- if (!d_unhashed(proc_dentry)) {
- dget_locked(proc_dentry);
- __d_drop(proc_dentry);
- spin_unlock(&proc_dentry->d_lock);
- } else {
- spin_unlock(&proc_dentry->d_lock);
- proc_dentry = NULL;
- }
- spin_unlock(&dcache_lock);
- }
- return proc_dentry;
-}
+ name.name = buf;
+ name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
+ leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+ if (!leader)
+ goto out;
-/**
- * proc_pid_flush - recover memory used by stale /proc/@pid/x entries
- * @proc_dentry: directoy to prune.
- *
- * Shrink the /proc directory that was used by the just killed thread.
- */
-
-void proc_pid_flush(struct dentry *proc_dentry)
-{
- might_sleep();
- if(proc_dentry != NULL) {
- shrink_dcache_parent(proc_dentry);
- dput(proc_dentry);
+ name.name = "task";
+ name.len = strlen(name.name);
+ dir = d_hash_and_lookup(leader, &name);
+ if (!dir)
+ goto out_put_leader;
+
+ name.name = buf;
+ name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
+ dentry = d_hash_and_lookup(dir, &name);
+ if (dentry) {
+ shrink_dcache_parent(dentry);
+ d_drop(dentry);
+ dput(dentry);
}
+
+ dput(dir);
+out_put_leader:
+ dput(leader);
+out:
+ return;
}
/* SMP-safe */
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
{
+ struct dentry *result = ERR_PTR(-ENOENT);
struct task_struct *task;
struct inode *inode;
struct proc_inode *ei;
unsigned tgid;
- int died;
if (dentry->d_name.len == 4 && !memcmp(dentry->d_name.name,"self",4)) {
inode = new_inode(dir->i_sb);
@@ -2029,21 +2019,18 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
if (tgid == ~0U)
goto out;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
task = find_task_by_pid(tgid);
if (task)
get_task_struct(task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!task)
goto out;
inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
+ if (!inode)
+ goto out_put_task;
-
- if (!inode) {
- put_task_struct(task);
- goto out;
- }
inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
inode->i_op = &proc_tgid_base_inode_operations;
inode->i_fop = &proc_tgid_base_operations;
@@ -2054,45 +2041,40 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
inode->i_nlink = 4;
#endif
- dentry->d_op = &pid_base_dentry_operations;
+ dentry->d_op = &pid_dentry_operations;
- died = 0;
d_add(dentry, inode);
- spin_lock(&task->proc_lock);
- task->proc_dentry = dentry;
- if (!pid_alive(task)) {
- dentry = proc_pid_unhash(task);
- died = 1;
- }
- spin_unlock(&task->proc_lock);
+ /* Close the race of the process dying before we return the dentry */
+ if (pid_revalidate(dentry, NULL))
+ result = NULL;
+out_put_task:
put_task_struct(task);
- if (died) {
- proc_pid_flush(dentry);
- goto out;
- }
- return NULL;
out:
- return ERR_PTR(-ENOENT);
+ return result;
}
/* SMP-safe */
static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
{
+ struct dentry *result = ERR_PTR(-ENOENT);
struct task_struct *task;
- struct task_struct *leader = proc_task(dir);
+ struct task_struct *leader = get_proc_task(dir);
struct inode *inode;
unsigned tid;
+ if (!leader)
+ goto out_no_task;
+
tid = name_to_int(dentry);
if (tid == ~0U)
goto out;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
task = find_task_by_pid(tid);
if (task)
get_task_struct(task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!task)
goto out;
if (leader->tgid != task->tgid)
@@ -2113,101 +2095,95 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
inode->i_nlink = 3;
#endif
- dentry->d_op = &pid_base_dentry_operations;
+ dentry->d_op = &pid_dentry_operations;
d_add(dentry, inode);
+ /* Close the race of the process dying before we return the dentry */
+ if (pid_revalidate(dentry, NULL))
+ result = NULL;
- put_task_struct(task);
- return NULL;
out_drop_task:
put_task_struct(task);
out:
- return ERR_PTR(-ENOENT);
+ put_task_struct(leader);
+out_no_task:
+ return result;
}
-#define PROC_NUMBUF 10
-#define PROC_MAXPIDS 20
-
/*
- * Get a few tgid's to return for filldir - we need to hold the
- * tasklist lock while doing this, and we must release it before
- * we actually do the filldir itself, so we use a temp buffer..
+ * Find the first tgid to return to user space.
+ *
+ * Usually this is just whatever follows &init_task, but if the users
+ * buffer was too small to hold the full list or there was a seek into
+ * the middle of the directory we have more work to do.
+ *
+ * In the case of a short read we start with find_task_by_pid.
+ *
+ * In the case of a seek we start with &init_task and walk nr
+ * threads past it.
*/
-static int get_tgid_list(int index, unsigned long version, unsigned int *tgids)
-{
- struct task_struct *p;
- int nr_tgids = 0;
-
- index--;
- read_lock(&tasklist_lock);
- p = NULL;
- if (version) {
- p = find_task_by_pid(version);
- if (p && !thread_group_leader(p))
- p = NULL;
+static struct task_struct *first_tgid(int tgid, unsigned int nr)
+{
+ struct task_struct *pos;
+ rcu_read_lock();
+ if (tgid && nr) {
+ pos = find_task_by_pid(tgid);
+ if (pos && thread_group_leader(pos))
+ goto found;
}
+ /* If nr exceeds the number of processes get out quickly */
+ pos = NULL;
+ if (nr && nr >= nr_processes())
+ goto done;
- if (p)
- index = 0;
- else
- p = next_task(&init_task);
-
- for ( ; p != &init_task; p = next_task(p)) {
- int tgid = p->pid;
- if (!pid_alive(p))
- continue;
- if (--index >= 0)
- continue;
- tgids[nr_tgids] = tgid;
- nr_tgids++;
- if (nr_tgids >= PROC_MAXPIDS)
- break;
+ /* If we haven't found our starting place yet start with
+ * the init_task and walk nr tasks forward.
+ */
+ for (pos = next_task(&init_task); nr > 0; --nr) {
+ pos = next_task(pos);
+ if (pos == &init_task) {
+ pos = NULL;
+ goto done;
+ }
}
- read_unlock(&tasklist_lock);
- return nr_tgids;
+found:
+ get_task_struct(pos);
+done:
+ rcu_read_unlock();
+ return pos;
}
/*
- * Get a few tid's to return for filldir - we need to hold the
- * tasklist lock while doing this, and we must release it before
- * we actually do the filldir itself, so we use a temp buffer..
+ * Find the next task in the task list.
+ * Return NULL if we loop or there is any error.
+ *
+ * The reference to the input task_struct is released.
*/
-static int get_tid_list(int index, unsigned int *tids, struct inode *dir)
-{
- struct task_struct *leader_task = proc_task(dir);
- struct task_struct *task = leader_task;
- int nr_tids = 0;
-
- index -= 2;
- read_lock(&tasklist_lock);
- /*
- * The starting point task (leader_task) might be an already
- * unlinked task, which cannot be used to access the task-list
- * via next_thread().
- */
- if (pid_alive(task)) do {
- int tid = task->pid;
-
- if (--index >= 0)
- continue;
- if (tids != NULL)
- tids[nr_tids] = tid;
- nr_tids++;
- if (nr_tids >= PROC_MAXPIDS)
- break;
- } while ((task = next_thread(task)) != leader_task);
- read_unlock(&tasklist_lock);
- return nr_tids;
+static struct task_struct *next_tgid(struct task_struct *start)
+{
+ struct task_struct *pos;
+ rcu_read_lock();
+ pos = start;
+ if (pid_alive(start))
+ pos = next_task(start);
+ if (pid_alive(pos) && (pos != &init_task)) {
+ get_task_struct(pos);
+ goto done;
+ }
+ pos = NULL;
+done:
+ rcu_read_unlock();
+ put_task_struct(start);
+ return pos;
}
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int tgid_array[PROC_MAXPIDS];
char buf[PROC_NUMBUF];
unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- unsigned int nr_tgids, i;
- int next_tgid;
+ struct task_struct *task;
+ int tgid;
if (!nr) {
ino_t ino = fake_ino(0,PROC_TGID_INO);
@@ -2216,63 +2192,116 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
filp->f_pos++;
nr++;
}
+ nr -= 1;
/* f_version caches the tgid value that the last readdir call couldn't
* return. lseek aka telldir automagically resets f_version to 0.
*/
- next_tgid = filp->f_version;
+ tgid = filp->f_version;
filp->f_version = 0;
- for (;;) {
- nr_tgids = get_tgid_list(nr, next_tgid, tgid_array);
- if (!nr_tgids) {
- /* no more entries ! */
+ for (task = first_tgid(tgid, nr);
+ task;
+ task = next_tgid(task), filp->f_pos++) {
+ int len;
+ ino_t ino;
+ tgid = task->pid;
+ len = snprintf(buf, sizeof(buf), "%d", tgid);
+ ino = fake_ino(tgid, PROC_TGID_INO);
+ if (filldir(dirent, buf, len, filp->f_pos, ino, DT_DIR) < 0) {
+ /* returning this tgid failed, save it as the first
+ * pid for the next readir call */
+ filp->f_version = tgid;
+ put_task_struct(task);
break;
}
- next_tgid = 0;
+ }
+ return 0;
+}
- /* do not use the last found pid, reserve it for next_tgid */
- if (nr_tgids == PROC_MAXPIDS) {
- nr_tgids--;
- next_tgid = tgid_array[nr_tgids];
- }
+/*
+ * Find the first tid of a thread group to return to user space.
+ *
+ * Usually this is just the thread group leader, but if the users
+ * buffer was too small or there was a seek into the middle of the
+ * directory we have more work todo.
+ *
+ * In the case of a short read we start with find_task_by_pid.
+ *
+ * In the case of a seek we start with the leader and walk nr
+ * threads past it.
+ */
+static struct task_struct *first_tid(struct task_struct *leader,
+ int tid, int nr)
+{
+ struct task_struct *pos;
- for (i=0;i<nr_tgids;i++) {
- int tgid = tgid_array[i];
- ino_t ino = fake_ino(tgid,PROC_TGID_INO);
- unsigned long j = PROC_NUMBUF;
+ rcu_read_lock();
+ /* Attempt to start with the pid of a thread */
+ if (tid && (nr > 0)) {
+ pos = find_task_by_pid(tid);
+ if (pos && (pos->group_leader == leader))
+ goto found;
+ }
- do
- buf[--j] = '0' + (tgid % 10);
- while ((tgid /= 10) != 0);
+ /* If nr exceeds the number of threads there is nothing todo */
+ pos = NULL;
+ if (nr && nr >= get_nr_threads(leader))
+ goto out;
- if (filldir(dirent, buf+j, PROC_NUMBUF-j, filp->f_pos, ino, DT_DIR) < 0) {
- /* returning this tgid failed, save it as the first
- * pid for the next readir call */
- filp->f_version = tgid_array[i];
- goto out;
- }
- filp->f_pos++;
- nr++;
+ /* If we haven't found our starting place yet start
+ * with the leader and walk nr threads forward.
+ */
+ for (pos = leader; nr > 0; --nr) {
+ pos = next_thread(pos);
+ if (pos == leader) {
+ pos = NULL;
+ goto out;
}
}
+found:
+ get_task_struct(pos);
out:
- return 0;
+ rcu_read_unlock();
+ return pos;
+}
+
+/*
+ * Find the next thread in the thread list.
+ * Return NULL if there is an error or no next thread.
+ *
+ * The reference to the input task_struct is released.
+ */
+static struct task_struct *next_tid(struct task_struct *start)
+{
+ struct task_struct *pos = NULL;
+ rcu_read_lock();
+ if (pid_alive(start)) {
+ pos = next_thread(start);
+ if (thread_group_leader(pos))
+ pos = NULL;
+ else
+ get_task_struct(pos);
+ }
+ rcu_read_unlock();
+ put_task_struct(start);
+ return pos;
}
/* for the /proc/TGID/task/ directories */
static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int tid_array[PROC_MAXPIDS];
char buf[PROC_NUMBUF];
- unsigned int nr_tids, i;
struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode;
+ struct task_struct *leader = get_proc_task(inode);
+ struct task_struct *task;
int retval = -ENOENT;
ino_t ino;
+ int tid;
unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
- if (!pid_alive(proc_task(inode)))
- goto out;
+ if (!leader)
+ goto out_no_task;
retval = 0;
switch (pos) {
@@ -2290,24 +2319,45 @@ static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldi
/* fall through */
}
- nr_tids = get_tid_list(pos, tid_array, inode);
- inode->i_nlink = pos + nr_tids;
-
- for (i = 0; i < nr_tids; i++) {
- unsigned long j = PROC_NUMBUF;
- int tid = tid_array[i];
-
- ino = fake_ino(tid,PROC_TID_INO);
-
- do
- buf[--j] = '0' + (tid % 10);
- while ((tid /= 10) != 0);
-
- if (filldir(dirent, buf+j, PROC_NUMBUF-j, pos, ino, DT_DIR) < 0)
+ /* f_version caches the tgid value that the last readdir call couldn't
+ * return. lseek aka telldir automagically resets f_version to 0.
+ */
+ tid = filp->f_version;
+ filp->f_version = 0;
+ for (task = first_tid(leader, tid, pos - 2);
+ task;
+ task = next_tid(task), pos++) {
+ int len;
+ tid = task->pid;
+ len = snprintf(buf, sizeof(buf), "%d", tid);
+ ino = fake_ino(tid, PROC_TID_INO);
+ if (filldir(dirent, buf, len, pos, ino, DT_DIR < 0)) {
+ /* returning this tgid failed, save it as the first
+ * pid for the next readir call */
+ filp->f_version = tid;
+ put_task_struct(task);
break;
- pos++;
+ }
}
out:
filp->f_pos = pos;
+ put_task_struct(leader);
+out_no_task:
return retval;
}
+
+static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *p = get_proc_task(inode);
+ generic_fillattr(inode, stat);
+
+ if (p) {
+ rcu_read_lock();
+ stat->nlink += get_nr_threads(p);
+ rcu_read_unlock();
+ put_task_struct(p);
+ }
+
+ return 0;
+}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 722b9c463111..6dcef089e18e 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -58,14 +58,11 @@ static void de_put(struct proc_dir_entry *de)
static void proc_delete_inode(struct inode *inode)
{
struct proc_dir_entry *de;
- struct task_struct *tsk;
truncate_inode_pages(&inode->i_data, 0);
- /* Let go of any associated process */
- tsk = PROC_I(inode)->task;
- if (tsk)
- put_task_struct(tsk);
+ /* Stop tracking associated processes */
+ put_pid(PROC_I(inode)->pid);
/* Let go of any associated proc directory entry */
de = PROC_I(inode)->pde;
@@ -94,8 +91,8 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
if (!ei)
return NULL;
- ei->task = NULL;
- ei->type = 0;
+ ei->pid = NULL;
+ ei->fd = 0;
ei->op.proc_get_link = NULL;
ei->pde = NULL;
inode = &ei->vfs_inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 0502f17b860d..146a434ba944 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -37,16 +37,30 @@ extern int proc_tgid_stat(struct task_struct *, char *);
extern int proc_pid_status(struct task_struct *, char *);
extern int proc_pid_statm(struct task_struct *, char *);
+extern struct file_operations proc_maps_operations;
+extern struct file_operations proc_numa_maps_operations;
+extern struct file_operations proc_smaps_operations;
+
+extern struct file_operations proc_maps_operations;
+extern struct file_operations proc_numa_maps_operations;
+extern struct file_operations proc_smaps_operations;
+
+
void free_proc_entry(struct proc_dir_entry *de);
int proc_init_inodecache(void);
-static inline struct task_struct *proc_task(struct inode *inode)
+static inline struct pid *proc_pid(struct inode *inode)
+{
+ return PROC_I(inode)->pid;
+}
+
+static inline struct task_struct *get_proc_task(struct inode *inode)
{
- return PROC_I(inode)->task;
+ return get_pid_task(proc_pid(inode), PIDTYPE_PID);
}
-static inline int proc_type(struct inode *inode)
+static inline int proc_fd(struct inode *inode)
{
- return PROC_I(inode)->type;
+ return PROC_I(inode)->fd;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 91b7c15ab373..0a163a4f7764 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -75,9 +75,13 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
{
struct vm_area_struct * vma;
int result = -ENOENT;
- struct task_struct *task = proc_task(inode);
- struct mm_struct * mm = get_task_mm(task);
+ struct task_struct *task = get_proc_task(inode);
+ struct mm_struct * mm = NULL;
+ if (task) {
+ mm = get_task_mm(task);
+ put_task_struct(task);
+ }
if (!mm)
goto out;
down_read(&mm->mmap_sem);
@@ -118,9 +122,15 @@ struct mem_size_stats
unsigned long private_dirty;
};
+__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{
- struct task_struct *task = m->private;
+ struct proc_maps_private *priv = m->private;
+ struct task_struct *task = priv->task;
struct vm_area_struct *vma = v;
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
@@ -153,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
pad_len_spaces(m, len);
seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
} else {
- if (mm) {
- if (vma->vm_start <= mm->start_brk &&
+ const char *name = arch_vma_name(vma);
+ if (!name) {
+ if (mm) {
+ if (vma->vm_start <= mm->start_brk &&
vma->vm_end >= mm->brk) {
- pad_len_spaces(m, len);
- seq_puts(m, "[heap]");
- } else {
- if (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack) {
-
- pad_len_spaces(m, len);
- seq_puts(m, "[stack]");
+ name = "[heap]";
+ } else if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+ name = "[stack]";
}
+ } else {
+ name = "[vdso]";
}
- } else {
+ }
+ if (name) {
pad_len_spaces(m, len);
- seq_puts(m, "[vdso]");
+ seq_puts(m, name);
}
}
seq_putc(m, '\n');
@@ -295,12 +306,16 @@ static int show_smap(struct seq_file *m, void *v)
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct task_struct *task = m->private;
+ struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
- struct vm_area_struct *vma, *tail_vma;
+ struct vm_area_struct *vma, *tail_vma = NULL;
loff_t l = *pos;
+ /* Clear the per syscall fields in priv */
+ priv->task = NULL;
+ priv->tail_vma = NULL;
+
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
@@ -311,11 +326,15 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (last_addr == -1UL)
return NULL;
- mm = get_task_mm(task);
+ priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
+ if (!priv->task)
+ return NULL;
+
+ mm = get_task_mm(priv->task);
if (!mm)
return NULL;
- tail_vma = get_gate_vma(task);
+ priv->tail_vma = tail_vma = get_gate_vma(priv->task);
down_read(&mm->mmap_sem);
/* Start with last addr hint */
@@ -350,11 +369,9 @@ out:
return tail_vma;
}
-static void m_stop(struct seq_file *m, void *v)
+static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
- struct task_struct *task = m->private;
- struct vm_area_struct *vma = v;
- if (vma && vma != get_gate_vma(task)) {
+ if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
@@ -363,38 +380,103 @@ static void m_stop(struct seq_file *m, void *v)
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct task_struct *task = m->private;
+ struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
- struct vm_area_struct *tail_vma = get_gate_vma(task);
+ struct vm_area_struct *tail_vma = priv->tail_vma;
(*pos)++;
if (vma && (vma != tail_vma) && vma->vm_next)
return vma->vm_next;
- m_stop(m, v);
+ vma_stop(priv, vma);
return (vma != tail_vma)? tail_vma: NULL;
}
-struct seq_operations proc_pid_maps_op = {
+static void m_stop(struct seq_file *m, void *v)
+{
+ struct proc_maps_private *priv = m->private;
+ struct vm_area_struct *vma = v;
+
+ vma_stop(priv, vma);
+ if (priv->task)
+ put_task_struct(priv->task);
+}
+
+static struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_map
};
-struct seq_operations proc_pid_smaps_op = {
+static struct seq_operations proc_pid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_smap
};
+static int do_maps_open(struct inode *inode, struct file *file,
+ struct seq_operations *ops)
+{
+ struct proc_maps_private *priv;
+ int ret = -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv) {
+ priv->pid = proc_pid(inode);
+ ret = seq_open(file, ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = priv;
+ } else {
+ kfree(priv);
+ }
+ }
+ return ret;
+}
+
+static int maps_open(struct inode *inode, struct file *file)
+{
+ return do_maps_open(inode, file, &proc_pid_maps_op);
+}
+
+struct file_operations proc_maps_operations = {
+ .open = maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
#ifdef CONFIG_NUMA
extern int show_numa_map(struct seq_file *m, void *v);
-struct seq_operations proc_pid_numa_maps_op = {
+static struct seq_operations proc_pid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_numa_map
};
+
+static int numa_maps_open(struct inode *inode, struct file *file)
+{
+ return do_maps_open(inode, file, &proc_pid_numa_maps_op);
+}
+
+struct file_operations proc_numa_maps_operations = {
+ .open = numa_maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
#endif
+
+static int smaps_open(struct inode *inode, struct file *file)
+{
+ return do_maps_open(inode, file, &proc_pid_smaps_op);
+}
+
+struct file_operations proc_smaps_operations = {
+ .open = smaps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8f68827ed10e..af69f28277b6 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -156,9 +156,28 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
return NULL;
}
-struct seq_operations proc_pid_maps_op = {
+static struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_map
};
+
+static int maps_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ ret = seq_open(file, &proc_pid_maps_op);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = NULL;
+ }
+ return ret;
+}
+
+struct file_operations proc_maps_operations = {
+ .open = maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2f24c46f72a1..8bc182a88748 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -450,7 +450,7 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,qnx4_get_block);
}
-static struct address_space_operations qnx4_aops = {
+static const struct address_space_operations qnx4_aops = {
.readpage = qnx4_readpage,
.writepage = qnx4_writepage,
.sync_page = block_sync_page,
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 00a933eb820c..86f14cacf641 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -26,7 +26,7 @@
#include <linux/fs.h>
-struct address_space_operations ramfs_aops = {
+const struct address_space_operations ramfs_aops = {
.readpage = simple_readpage,
.prepare_write = simple_prepare_write,
.commit_write = simple_commit_write
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index f443a84b98a5..99fffc9e1bfd 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -27,7 +27,7 @@
static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
-struct address_space_operations ramfs_aops = {
+const struct address_space_operations ramfs_aops = {
.readpage = simple_readpage,
.prepare_write = simple_prepare_write,
.commit_write = simple_commit_write
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h
index 313237631b49..c2bb58e74653 100644
--- a/fs/ramfs/internal.h
+++ b/fs/ramfs/internal.h
@@ -10,6 +10,6 @@
*/
-extern struct address_space_operations ramfs_aops;
+extern const struct address_space_operations ramfs_aops;
extern const struct file_operations ramfs_file_operations;
extern struct inode_operations ramfs_file_inode_operations;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index cf6e1cf40351..752cea12e30f 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -1560,12 +1560,6 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
return res;
}
-static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user * buf,
- size_t count, loff_t pos)
-{
- return generic_file_aio_write(iocb, buf, count, pos);
-}
-
const struct file_operations reiserfs_file_operations = {
.read = generic_file_read,
.write = reiserfs_file_write,
@@ -1575,7 +1569,7 @@ const struct file_operations reiserfs_file_operations = {
.fsync = reiserfs_sync_file,
.sendfile = generic_file_sendfile,
.aio_read = generic_file_aio_read,
- .aio_write = reiserfs_aio_write,
+ .aio_write = generic_file_aio_write,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9857e50f85e7..a24858a632fa 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2996,7 +2996,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
}
-struct address_space_operations reiserfs_address_space_operations = {
+const struct address_space_operations reiserfs_address_space_operations = {
.writepage = reiserfs_writepage,
.readpage = reiserfs_readpage,
.readpages = reiserfs_readpages,
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 1b73529b8099..49d1a53dbef0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -834,8 +834,7 @@ static int write_ordered_buffers(spinlock_t * lock,
get_bh(bh);
if (test_set_buffer_locked(bh)) {
if (!buffer_dirty(bh)) {
- list_del_init(&jh->list);
- list_add(&jh->list, &tmp);
+ list_move(&jh->list, &tmp);
goto loop_next;
}
spin_unlock(lock);
@@ -855,8 +854,7 @@ static int write_ordered_buffers(spinlock_t * lock,
ret = -EIO;
}
if (buffer_dirty(bh)) {
- list_del_init(&jh->list);
- list_add(&jh->list, &tmp);
+ list_move(&jh->list, &tmp);
add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
} else {
reiserfs_free_jh(bh);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 283fbc6b8eea..22eed61ebf69 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -459,7 +459,7 @@ err_out:
/* Mapping from our types to the kernel */
-static struct address_space_operations romfs_aops = {
+static const struct address_space_operations romfs_aops = {
.readpage = romfs_readpage
};
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index ed9a24d19d7d..dae67048baba 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -306,7 +306,7 @@ static int smb_commit_write(struct file *file, struct page *page,
return status;
}
-struct address_space_operations smb_file_aops = {
+const struct address_space_operations smb_file_aops = {
.readpage = smb_readpage,
.writepage = smb_writepage,
.prepare_write = smb_prepare_write,
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
index 972ed7dad388..34fb462b2379 100644
--- a/fs/smbfs/proto.h
+++ b/fs/smbfs/proto.h
@@ -63,7 +63,7 @@ extern int smb_revalidate_inode(struct dentry *dentry);
extern int smb_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
extern int smb_notify_change(struct dentry *dentry, struct iattr *attr);
/* file.c */
-extern struct address_space_operations smb_file_aops;
+extern const struct address_space_operations smb_file_aops;
extern const struct file_operations smb_file_operations;
extern struct inode_operations smb_file_inode_operations;
/* ioctl.c */
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c71dd2760d32..c8e96195b96e 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -400,8 +400,7 @@ static int smb_request_send_req(struct smb_request *req)
if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
goto out;
- list_del_init(&req->rq_queue);
- list_add_tail(&req->rq_queue, &server->recvq);
+ list_move_tail(&req->rq_queue, &server->recvq);
result = 1;
out:
return result;
@@ -435,8 +434,7 @@ int smb_request_send_server(struct smb_sb_info *server)
result = smb_request_send_req(req);
if (result < 0) {
server->conn_error = result;
- list_del_init(&req->rq_queue);
- list_add(&req->rq_queue, &server->xmitq);
+ list_move(&req->rq_queue, &server->xmitq);
result = -EIO;
goto out;
}
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
index 3f71384020cb..24577e2c489b 100644
--- a/fs/smbfs/smbiod.c
+++ b/fs/smbfs/smbiod.c
@@ -193,8 +193,7 @@ int smbiod_retry(struct smb_sb_info *server)
if (req->rq_flags & SMB_REQ_RETRY) {
/* must move the request to the xmitq */
VERBOSE("retrying request %p on recvq\n", req);
- list_del(&req->rq_queue);
- list_add(&req->rq_queue, &server->xmitq);
+ list_move(&req->rq_queue, &server->xmitq);
continue;
}
#endif
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 610b5bdbe75b..61c42430cba3 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -430,10 +430,9 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
i++;
/* fallthrough */
default:
- if (filp->f_pos == 2) {
- list_del(q);
- list_add(q, &parent_sd->s_children);
- }
+ if (filp->f_pos == 2)
+ list_move(q, &parent_sd->s_children);
+
for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
struct sysfs_dirent *next;
const char * name;
@@ -455,8 +454,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
dt_type(next)) < 0)
return 0;
- list_del(q);
- list_add(q, p);
+ list_move(q, p);
p = q;
filp->f_pos++;
}
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index f0b347bd12ca..5e0e31cc46f5 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -16,7 +16,7 @@
extern struct super_block * sysfs_sb;
-static struct address_space_operations sysfs_aops = {
+static const struct address_space_operations sysfs_aops = {
.readpage = simple_readpage,
.prepare_write = simple_prepare_write,
.commit_write = simple_commit_write
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 86f5f8d43d0f..f2bcccd1d6fc 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -465,7 +465,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,get_block);
}
-struct address_space_operations sysv_aops = {
+const struct address_space_operations sysv_aops = {
.readpage = sysv_readpage,
.writepage = sysv_writepage,
.sync_page = block_sync_page,
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 393a480e4deb..9dcc82120935 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -161,7 +161,7 @@ extern struct inode_operations sysv_dir_inode_operations;
extern struct inode_operations sysv_fast_symlink_inode_operations;
extern const struct file_operations sysv_file_operations;
extern const struct file_operations sysv_dir_operations;
-extern struct address_space_operations sysv_aops;
+extern const struct address_space_operations sysv_aops;
extern struct super_operations sysv_sops;
extern struct dentry_operations sysv_dentry_operations;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index e34b00e303f1..a59e5f33daf6 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -95,7 +95,7 @@ static int udf_adinicb_commit_write(struct file *file, struct page *page, unsign
return 0;
}
-struct address_space_operations udf_adinicb_aops = {
+const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
.sync_page = block_sync_page,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 2983afd5e7fd..605f5111b6d8 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -132,7 +132,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping,block,udf_get_block);
}
-struct address_space_operations udf_aops = {
+const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.writepage = udf_writepage,
.sync_page = block_sync_page,
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 674bb40edc83..ba068a786563 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -113,6 +113,6 @@ out:
/*
* symlinks can't do much...
*/
-struct address_space_operations udf_symlink_aops = {
+const struct address_space_operations udf_symlink_aops = {
.readpage = udf_symlink_filler,
};
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 023e19ba5a2e..2f992387cc9e 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -47,9 +47,9 @@ extern struct inode_operations udf_dir_inode_operations;
extern const struct file_operations udf_dir_operations;
extern struct inode_operations udf_file_inode_operations;
extern const struct file_operations udf_file_operations;
-extern struct address_space_operations udf_aops;
-extern struct address_space_operations udf_adinicb_aops;
-extern struct address_space_operations udf_symlink_aops;
+extern const struct address_space_operations udf_aops;
+extern const struct address_space_operations udf_adinicb_aops;
+extern const struct address_space_operations udf_symlink_aops;
struct udf_fileident_bh
{
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index f2dbdf5a8769..488b5ff48afb 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -98,7 +98,9 @@ static u64 ufs_frag_map(struct inode *inode, sector_t frag)
u64 temp = 0L;
UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
- UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask);
+ UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
+ uspi->s_fpbshift, uspi->s_apbmask,
+ (unsigned long long)mask);
if (depth == 0)
return 0;
@@ -429,7 +431,7 @@ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head
if (!create) {
phys64 = ufs_frag_map(inode, fragment);
- UFSD("phys64 = %llu \n",phys64);
+ UFSD("phys64 = %llu\n", (unsigned long long)phys64);
if (phys64)
map_bh(bh_result, sb, phys64);
return 0;
@@ -574,7 +576,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,ufs_getfrag_block);
}
-struct address_space_operations ufs_aops = {
+const struct address_space_operations ufs_aops = {
.readpage = ufs_readpage,
.writepage = ufs_writepage,
.sync_page = block_sync_page,
@@ -605,39 +607,12 @@ static void ufs_set_inode_ops(struct inode *inode)
ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
}
-void ufs_read_inode (struct inode * inode)
+static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
- struct super_block * sb;
- struct ufs_sb_private_info * uspi;
- struct ufs_inode * ufs_inode;
- struct ufs2_inode *ufs2_inode;
- struct buffer_head * bh;
+ struct super_block *sb = inode->i_sb;
mode_t mode;
unsigned i;
- unsigned flags;
-
- UFSD("ENTER, ino %lu\n", inode->i_ino);
-
- sb = inode->i_sb;
- uspi = UFS_SB(sb)->s_uspi;
- flags = UFS_SB(sb)->s_flags;
-
- if (inode->i_ino < UFS_ROOTINO ||
- inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
- ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
- goto bad_inode;
- }
-
- bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
- if (!bh) {
- ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
- goto bad_inode;
- }
- if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
- goto ufs2_inode;
-
- ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
/*
* Copy data to the in-core inode.
@@ -661,14 +636,11 @@ void ufs_read_inode (struct inode * inode)
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */
- inode->i_version++;
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
- ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
- ufsi->i_dir_start_lookup = 0;
+
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
@@ -677,24 +649,16 @@ void ufs_read_inode (struct inode * inode)
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
}
- ufsi->i_osync = 0;
-
- ufs_set_inode_ops(inode);
-
- brelse (bh);
-
- UFSD("EXIT\n");
- return;
+}
-bad_inode:
- make_bad_inode(inode);
- return;
+static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
+{
+ struct ufs_inode_info *ufsi = UFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ mode_t mode;
+ unsigned i;
-ufs2_inode :
UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
-
- ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
-
/*
* Copy data to the in-core inode.
*/
@@ -717,26 +681,64 @@ ufs2_inode :
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
- inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
-
- inode->i_version++;
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
/*
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
*/
- ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
ufsi->i_u1.u2_i_data[i] =
ufs2_inode->ui_u2.ui_addr.ui_db[i];
- }
- else {
+ } else {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
}
+}
+
+void ufs_read_inode(struct inode * inode)
+{
+ struct ufs_inode_info *ufsi = UFS_I(inode);
+ struct super_block * sb;
+ struct ufs_sb_private_info * uspi;
+ struct buffer_head * bh;
+
+ UFSD("ENTER, ino %lu\n", inode->i_ino);
+
+ sb = inode->i_sb;
+ uspi = UFS_SB(sb)->s_uspi;
+
+ if (inode->i_ino < UFS_ROOTINO ||
+ inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+ ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
+ inode->i_ino);
+ goto bad_inode;
+ }
+
+ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
+ if (!bh) {
+ ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
+ inode->i_ino);
+ goto bad_inode;
+ }
+ if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+ struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
+
+ ufs2_read_inode(inode,
+ ufs2_inode + ufs_inotofsbo(inode->i_ino));
+ } else {
+ struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
+
+ ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
+ }
+
+ inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/
+ inode->i_version++;
+ ufsi->i_lastfrag =
+ (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
+ ufsi->i_dir_start_lookup = 0;
ufsi->i_osync = 0;
ufs_set_inode_ops(inode);
@@ -745,6 +747,9 @@ ufs2_inode :
UFSD("EXIT\n");
return;
+
+bad_inode:
+ make_bad_inode(inode);
}
static int ufs_update_inode(struct inode * inode, int do_sync)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 3e807b828e22..c40f81ba9b13 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1454,7 +1454,7 @@ xfs_vm_invalidatepage(
block_invalidatepage(page, offset);
}
-struct address_space_operations xfs_address_space_operations = {
+const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 706d8c781b8a..2244e516b66a 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -40,7 +40,7 @@ typedef struct xfs_ioend {
struct work_struct io_work; /* xfsdatad work queue */
} xfs_ioend_t;
-extern struct address_space_operations xfs_address_space_operations;
+extern const struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 26fed0756f01..2af528dcfb04 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1520,7 +1520,7 @@ xfs_mapping_buftarg(
struct backing_dev_info *bdi;
struct inode *inode;
struct address_space *mapping;
- static struct address_space_operations mapping_aops = {
+ static const struct address_space_operations mapping_aops = {
.sync_page = block_sync_page,
.migratepage = fail_migrate_page,
};
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 70662371bb11..3d4f6dff2113 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -299,7 +299,8 @@ xfs_file_open(
STATIC int
xfs_file_close(
- struct file *filp)
+ struct file *filp,
+ fl_owner_t id)
{
return -bhv_vop_close(vn_from_inode(filp->f_dentry->d_inode), 0,
file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 12810baeb5d4..d9180020de63 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -419,16 +419,15 @@ xfs_vn_link(
int error;
ip = old_dentry->d_inode; /* inode being linked to */
- if (S_ISDIR(ip->i_mode))
- return -EPERM;
-
tdvp = vn_from_inode(dir);
vp = vn_from_inode(ip);
+ VN_HOLD(vp);
error = bhv_vop_link(tdvp, vp, dentry, NULL);
- if (likely(!error)) {
+ if (unlikely(error)) {
+ VN_RELE(vp);
+ } else {
VMODIFY(tdvp);
- VN_HOLD(vp);
xfs_validate_fields(ip, &vattr);
d_instantiate(dentry, ip);
}
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index aa26ab906c88..028eb17ec2ed 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -140,9 +140,7 @@ BUFFER_FNS(PrivateStart, unwritten);
#define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid)
-#define current_set_flags(f) (current->flags |= (f))
#define current_test_flags(f) (current->flags & (f))
-#define current_clear_flags(f) (current->flags & ~(f))
#define current_set_flags_nested(sp, f) \
(*(sp) = current->flags, current->flags |= (f))
#define current_clear_flags_nested(sp, f) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 35c6a01963a7..c42b3221b20c 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -93,7 +93,7 @@ typedef enum {
*/
static inline struct bhv_vnode *vn_from_inode(struct inode *inode)
{
- return (bhv_vnode_t *)list_entry(inode, bhv_vnode_t, v_inode);
+ return container_of(inode, bhv_vnode_t, v_inode);
}
static inline struct inode *vn_to_inode(struct bhv_vnode *vnode)
{
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
index 1d8ff103201c..6e6e56fb352d 100644
--- a/fs/xfs/xfs_behavior.h
+++ b/fs/xfs/xfs_behavior.h
@@ -78,15 +78,12 @@
*
*/
-struct bhv_head_lock;
-
/*
* Behavior head. Head of the chain of behaviors.
* Contained within each virtualized object data structure.
*/
typedef struct bhv_head {
struct bhv_desc *bh_first; /* first behavior in chain */
- struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */
} bhv_head_t;
/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5fa0adb7e173..86c1bf0bba9e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1961,9 +1961,9 @@ xfs_iunlink_remove(
xfs_agino_t agino;
xfs_agino_t next_agino;
xfs_buf_t *last_ibp;
- xfs_dinode_t *last_dip;
+ xfs_dinode_t *last_dip = NULL;
short bucket_index;
- int offset, last_offset;
+ int offset, last_offset = 0;
int error;
int agi_ok;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index d8f5d4cbe8b7..e730328636c3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1740,10 +1740,10 @@ xlog_write(xfs_mount_t * mp,
xlog_in_core_t **commit_iclog,
uint flags)
{
- xlog_t *log = mp->m_log;
+ xlog_t *log = mp->m_log;
xlog_ticket_t *ticket = (xlog_ticket_t *)tic;
+ xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */
xlog_op_header_t *logop_head; /* ptr to log operation header */
- xlog_in_core_t *iclog; /* ptr to current in-core log */
__psint_t ptr; /* copy address into data region */
int len; /* # xlog_write() bytes 2 still copy */
int index; /* region index currently copying */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 55b4237c2153..3cb678e3a132 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -990,6 +990,8 @@ xlog_find_zeroed(
xfs_daddr_t num_scan_bblks;
int error, log_bbnum = log->l_logBBsize;
+ *blk_no = 0;
+
/* check totally zeroed log */
bp = xlog_get_bp(log, 1);
if (!bp)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 10dbf203c62f..4be5c0b2d296 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1721,15 +1721,14 @@ xfs_mount_log_sbunit(
* is present to prevent thrashing).
*/
+#ifdef CONFIG_HOTPLUG_CPU
/*
* hot-plug CPU notifier support.
*
- * We cannot use the hotcpu_register() function because it does
- * not allow notifier instances. We need a notifier per filesystem
- * as we need to be able to identify the filesystem to balance
- * the counters out. This is achieved by having a notifier block
- * embedded in the xfs_mount_t and doing pointer magic to get the
- * mount pointer from the notifier block address.
+ * We need a notifier per filesystem as we need to be able to identify
+ * the filesystem to balance the counters out. This is achieved by
+ * having a notifier block embedded in the xfs_mount_t and doing pointer
+ * magic to get the mount pointer from the notifier block address.
*/
STATIC int
xfs_icsb_cpu_notify(
@@ -1779,6 +1778,7 @@ xfs_icsb_cpu_notify(
return NOTIFY_OK;
}
+#endif /* CONFIG_HOTPLUG_CPU */
int
xfs_icsb_init_counters(
@@ -1791,9 +1791,11 @@ xfs_icsb_init_counters(
if (mp->m_sb_cnts == NULL)
return -ENOMEM;
+#ifdef CONFIG_HOTPLUG_CPU
mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
mp->m_icsb_notifier.priority = 0;
- register_cpu_notifier(&mp->m_icsb_notifier);
+ register_hotcpu_notifier(&mp->m_icsb_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
for_each_online_cpu(i) {
cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
@@ -1812,7 +1814,7 @@ xfs_icsb_destroy_counters(
xfs_mount_t *mp)
{
if (mp->m_sb_cnts) {
- unregister_cpu_notifier(&mp->m_icsb_notifier);
+ unregister_hotcpu_notifier(&mp->m_icsb_notifier);
free_percpu(mp->m_sb_cnts);
}
}
@@ -2026,7 +2028,7 @@ xfs_icsb_balance_counter(
xfs_sb_field_t field,
int flags)
{
- uint64_t count, resid = 0;
+ uint64_t count, resid;
int weight = num_online_cpus();
int s;
@@ -2058,6 +2060,7 @@ xfs_icsb_balance_counter(
break;
default:
BUG();
+ count = resid = 0; /* quiet, gcc */
break;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 0c1e42b037ef..5a0b678956e0 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1929,7 +1929,7 @@ xfs_growfs_rt(
/*
* Initial error checking.
*/
- if (mp->m_rtdev_targp || mp->m_rbmip == NULL ||
+ if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
(nrblocks = in->newblocks) <= sbp->sb_rblocks ||
(sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
return XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index cb65c3a603f5..9dc88b380608 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -338,8 +338,6 @@ typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
typedef struct xfs_trans {
unsigned int t_magic; /* magic number */
xfs_log_callback_t t_logcb; /* log callback struct */
- struct xfs_trans *t_forw; /* async list pointers */
- struct xfs_trans *t_back; /* async list pointers */
unsigned int t_type; /* transaction type */
unsigned int t_log_res; /* amt of log space resvd */
unsigned int t_log_count; /* count for perm log res */
@@ -364,9 +362,11 @@ typedef struct xfs_trans {
long t_res_fdblocks_delta; /* on-disk only chg */
long t_frextents_delta;/* superblock freextents chg*/
long t_res_frextents_delta; /* on-disk only chg */
+#ifdef DEBUG
long t_ag_freeblks_delta; /* debugging counter */
long t_ag_flist_delta; /* debugging counter */
long t_ag_btree_delta; /* debugging counter */
+#endif
long t_dblocks_delta;/* superblock dblocks change */
long t_agcount_delta;/* superblock agcount change */
long t_imaxpct_delta;/* superblock imaxpct change */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 00a6b7dc24a0..23cfa5837728 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2603,8 +2603,7 @@ xfs_link(
vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
target_namelen = VNAMELEN(dentry);
- if (VN_ISDIR(src_vp))
- return XFS_ERROR(EPERM);
+ ASSERT(!VN_ISDIR(src_vp));
sip = xfs_vtoi(src_vp);
tdp = XFS_BHVTOI(target_dir_bdp);
@@ -2699,9 +2698,8 @@ xfs_link(
xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
error = xfs_bumplink(tp, sip);
- if (error) {
+ if (error)
goto abort_return;
- }
/*
* If this is a synchronous mount, make sure that the
@@ -2719,9 +2717,8 @@ xfs_link(
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
- if (error) {
+ if (error)
goto std_return;
- }
/* Fall through to std_return with error = 0. */
std_return:
@@ -2742,6 +2739,8 @@ std_return:
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
}
+
+
/*
* xfs_mkdir
*
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index dba70c62a16c..457c34b6eb09 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(t2_hae_lock);
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
{
diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h
index ca9d43b63502..a37db0f95092 100644
--- a/include/asm-alpha/hw_irq.h
+++ b/include/asm-alpha/hw_irq.h
@@ -2,8 +2,6 @@
#define _ALPHA_HW_IRQ_H
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-
extern volatile unsigned long irq_err_count;
#ifdef CONFIG_ALPHA_GENERIC
diff --git a/include/asm-arm/arch-at91rm9200/memory.h b/include/asm-arm/arch-at91rm9200/memory.h
index 3c327c404373..f985069e6d01 100644
--- a/include/asm-arm/arch-at91rm9200/memory.h
+++ b/include/asm-arm/arch-at91rm9200/memory.h
@@ -33,9 +33,7 @@
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
-#define __virt_to_bus__is_a_macro
#define __virt_to_bus(x) __virt_to_phys(x)
-#define __bus_to_virt__is_a_macro
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
diff --git a/include/asm-arm/arch-h720x/memory.h b/include/asm-arm/arch-h720x/memory.h
index 4a1bfd78a0fe..53e923dba76e 100644
--- a/include/asm-arm/arch-h720x/memory.h
+++ b/include/asm-arm/arch-h720x/memory.h
@@ -23,9 +23,7 @@
* There is something to do here later !, Mar 2000, Jungjun Kim
*/
-#define __virt_to_bus__is_a_macro
#define __virt_to_bus(x) __virt_to_phys(x)
-#define __bus_to_virt__is_a_macro
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
diff --git a/include/asm-arm/arch-imx/memory.h b/include/asm-arm/arch-imx/memory.h
index d09ae32cd2f4..5ad90127915f 100644
--- a/include/asm-arm/arch-imx/memory.h
+++ b/include/asm-arm/arch-imx/memory.h
@@ -30,9 +30,7 @@
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
-#define __virt_to_bus__is_a_macro
-#define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET)
-#define __bus_to_virt__is_a_macro
-#define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET)
+#define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET)
+#define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET)
#endif
diff --git a/include/asm-arm/arch-ixp23xx/ixp23xx.h b/include/asm-arm/arch-ixp23xx/ixp23xx.h
index d0a72201ee96..3927b1d61b17 100644
--- a/include/asm-arm/arch-ixp23xx/ixp23xx.h
+++ b/include/asm-arm/arch-ixp23xx/ixp23xx.h
@@ -295,15 +295,4 @@
#define IXP23XX_PCI_CPP_ADDR_BITS IXP23XX_PCI_CSR(0x0160)
-#ifndef __ASSEMBLY__
-/*
- * Is system memory on the XSI or CPP bus?
- */
-static inline unsigned ixp23xx_cpp_boot(void)
-{
- return (*IXP23XX_EXP_CFG0 & IXP23XX_EXP_CFG0_XSI_NOT_PRES);
-}
-#endif
-
-
#endif
diff --git a/include/asm-arm/arch-ixp23xx/platform.h b/include/asm-arm/arch-ixp23xx/platform.h
index 19a73b39c864..56e16d66645a 100644
--- a/include/asm-arm/arch-ixp23xx/platform.h
+++ b/include/asm-arm/arch-ixp23xx/platform.h
@@ -43,5 +43,15 @@ extern struct sys_timer ixp23xx_timer;
#define IXP23XX_UART_XTAL 14745600
+#ifndef __ASSEMBLY__
+/*
+ * Is system memory on the XSI or CPP bus?
+ */
+static inline unsigned ixp23xx_cpp_boot(void)
+{
+ return (*IXP23XX_EXP_CFG0 & IXP23XX_EXP_CFG0_XSI_NOT_PRES);
+}
+#endif
+
#endif
diff --git a/include/asm-arm/arch-ixp23xx/uncompress.h b/include/asm-arm/arch-ixp23xx/uncompress.h
index 013575e6a9a1..16c1110f2304 100644
--- a/include/asm-arm/arch-ixp23xx/uncompress.h
+++ b/include/asm-arm/arch-ixp23xx/uncompress.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARCH_UNCOMPRESS_H
#define __ASM_ARCH_UNCOMPRESS_H
-#include <asm/hardware.h>
+#include <asm/arch/ixp23xx.h>
#include <linux/serial_reg.h>
#define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS)
diff --git a/include/asm-arm/arch-s3c2410/regs-dsc.h b/include/asm-arm/arch-s3c2410/regs-dsc.h
index 84aca61cbaa3..a0a124875164 100644
--- a/include/asm-arm/arch-s3c2410/regs-dsc.h
+++ b/include/asm-arm/arch-s3c2410/regs-dsc.h
@@ -7,25 +7,23 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * S3C2440 Signal Drive Strength Control
- *
- * Changelog:
- * 11-Aug-2004 BJD Created file
- * 25-Aug-2004 BJD Added the _SELECT_* defs for using with functions
+ * S3C2440/S3C2412 Signal Drive Strength Control
*/
#ifndef __ASM_ARCH_REGS_DSC_H
#define __ASM_ARCH_REGS_DSC_H "2440-dsc"
-#ifdef CONFIG_CPU_S3C2440
+#if defined(CONFIG_CPU_S3C2412)
+#define S3C2412_DSC0 S3C2410_GPIOREG(0xdc)
+#define S3C2412_DSC1 S3C2410_GPIOREG(0xe0)
+#endif
+
+#if defined(CONFIG_CPU_S3C2440)
#define S3C2440_DSC0 S3C2410_GPIOREG(0xc4)
#define S3C2440_DSC1 S3C2410_GPIOREG(0xc8)
-#define S3C2412_DSC0 S3C2410_GPIOREG(0xdc)
-#define S3C2412_DSC1 S3C2410_GPIOREG(0xe0)
-
#define S3C2440_SELECT_DSC0 (0)
#define S3C2440_SELECT_DSC1 (1<<31)
diff --git a/include/asm-arm/arch-s3c2410/regs-nand.h b/include/asm-arm/arch-s3c2410/regs-nand.h
index 7cff235e667a..c1470c695c33 100644
--- a/include/asm-arm/arch-s3c2410/regs-nand.h
+++ b/include/asm-arm/arch-s3c2410/regs-nand.h
@@ -39,10 +39,19 @@
#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
#define S3C2440_NFMECC1 S3C2410_NFREG(0x30)
-#define S3C2440_NFSECC S3C2410_NFREG(0x34)
+#define S3C2440_NFSECC S3C24E10_NFREG(0x34)
#define S3C2440_NFSBLK S3C2410_NFREG(0x38)
#define S3C2440_NFEBLK S3C2410_NFREG(0x3C)
+#define S3C2412_NFSBLK S3C2410_NFREG(0x20)
+#define S3C2412_NFEBLK S3C2410_NFREG(0x24)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC_ERR0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFMECC_ERR1 S3C2410_NFREG(0x30)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2412_NFMECC1 S3C2410_NFREG(0x38)
+#define S3C2412_NFSECC S3C2410_NFREG(0x3C)
+
#define S3C2410_NFCONF_EN (1<<15)
#define S3C2410_NFCONF_512BYTE (1<<14)
#define S3C2410_NFCONF_4STEP (1<<13)
@@ -77,5 +86,42 @@
#define S3C2440_NFSTAT_RnB_CHANGE (1<<2)
#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONF_ECCCLKCON (1<<30)
+#define S3C2412_NFCONF_ECC_MLC (1<<24)
+#define S3C2412_NFCONF_TACLS_MASK (7<<12) /* 1 extra bit of Tacls */
+
+#define S3C2412_NFCONT_ECC4_DIRWR (1<<18)
+#define S3C2412_NFCONT_LOCKTIGHT (1<<17)
+#define S3C2412_NFCONT_SOFTLOCK (1<<16)
+#define S3C2412_NFCONT_ECC4_ENCINT (1<<13)
+#define S3C2412_NFCONT_ECC4_DECINT (1<<12)
+#define S3C2412_NFCONT_MAIN_ECC_LOCK (1<<7)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE1 (1<<2)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+
+#define S3C2412_NFSTAT_ECC_ENCDONE (1<<7)
+#define S3C2412_NFSTAT_ECC_DECDONE (1<<6)
+#define S3C2412_NFSTAT_ILLEGAL_ACCESS (1<<5)
+#define S3C2412_NFSTAT_RnB_CHANGE (1<<4)
+#define S3C2412_NFSTAT_nFCE1 (1<<3)
+#define S3C2412_NFSTAT_nFCE0 (1<<2)
+#define S3C2412_NFSTAT_Res1 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
+#define S3C2412_NFECCERR_SERRDATA(x) (((x) >> 21) & 0xf)
+#define S3C2412_NFECCERR_SERRBIT(x) (((x) >> 18) & 0x7)
+#define S3C2412_NFECCERR_MERRDATA(x) (((x) >> 7) & 0x3ff)
+#define S3C2412_NFECCERR_MERRBIT(x) (((x) >> 4) & 0x7)
+#define S3C2412_NFECCERR_SPARE_ERR(x) (((x) >> 2) & 0x3)
+#define S3C2412_NFECCERR_MAIN_ERR(x) (((x) >> 2) & 0x3)
+#define S3C2412_NFECCERR_NONE (0)
+#define S3C2412_NFECCERR_1BIT (1)
+#define S3C2412_NFECCERR_MULTIBIT (2)
+#define S3C2412_NFECCERR_ECCAREA (3)
+
+
+
#endif /* __ASM_ARM_REGS_NAND */
diff --git a/include/asm-arm/assembler.h b/include/asm-arm/assembler.h
index d53bafa9bf1c..fce832820825 100644
--- a/include/asm-arm/assembler.h
+++ b/include/asm-arm/assembler.h
@@ -55,30 +55,6 @@
#define PLD(code...)
#endif
-#define MODE_USR USR_MODE
-#define MODE_FIQ FIQ_MODE
-#define MODE_IRQ IRQ_MODE
-#define MODE_SVC SVC_MODE
-
-#define DEFAULT_FIQ MODE_FIQ
-
-/*
- * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
- */
-#ifdef __STDC__
-#define LOADREGS(cond, base, reglist...)\
- ldm##cond base,reglist
-#else
-#define LOADREGS(cond, base, reglist...)\
- ldm/**/cond base,reglist
-#endif
-
-/*
- * Build a return instruction for this processor type.
- */
-#define RETINSTR(instr, regs...)\
- instr regs
-
/*
* Enable and disable interrupts
*/
@@ -117,18 +93,6 @@
msr cpsr_c, \oldcpsr
.endm
-/*
- * These two are used to save LR/restore PC over a user-based access.
- * The old 26-bit architecture requires that we do. On 32-bit
- * architecture, we can safely ignore this requirement.
- */
- .macro save_lr
- .endm
-
- .macro restore_pc
- mov pc, lr
- .endm
-
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
diff --git a/include/asm-arm/bugs.h b/include/asm-arm/bugs.h
index 4c80ec519d45..ca54eb0f12d7 100644
--- a/include/asm-arm/bugs.h
+++ b/include/asm-arm/bugs.h
@@ -10,8 +10,12 @@
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
+#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs()
+#else
+#define check_bugs() do { } while (0)
+#endif
#endif
diff --git a/include/asm-arm/domain.h b/include/asm-arm/domain.h
index f8ea2de4848e..4c2885abbe6c 100644
--- a/include/asm-arm/domain.h
+++ b/include/asm-arm/domain.h
@@ -50,6 +50,8 @@
#define domain_val(dom,type) ((type) << (2*(dom)))
#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
#define set_domain(x) \
do { \
__asm__ __volatile__( \
@@ -66,5 +68,10 @@
set_domain(thread->cpu_domain); \
} while (0)
+#else
+#define set_domain(x) do { } while (0)
+#define modify_domain(dom,type) do { } while (0)
+#endif
+
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/fpstate.h b/include/asm-arm/fpstate.h
index 132c3c5628b2..6af4e6bd1290 100644
--- a/include/asm-arm/fpstate.h
+++ b/include/asm-arm/fpstate.h
@@ -72,6 +72,14 @@ union fp_state {
#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
+struct crunch_state {
+ unsigned int mvdx[16][2];
+ unsigned int mvax[4][3];
+ unsigned int dspsc[2];
+};
+
+#define CRUNCH_SIZE sizeof(struct crunch_state)
+
#endif
#endif
diff --git a/include/asm-arm/hardware/locomo.h b/include/asm-arm/hardware/locomo.h
index 5f10048ec54e..22dfb1737768 100644
--- a/include/asm-arm/hardware/locomo.h
+++ b/include/asm-arm/hardware/locomo.h
@@ -111,6 +111,8 @@
#define LOCOMO_ALS 0x00 /* Adjust light cycle */
#define LOCOMO_ALD 0x04 /* Adjust light duty */
+#define LOCOMO_ALC_EN 0x8000
+
/* Backlight controller: TFT signal */
#define LOCOMO_BACKLIGHT 0x38
#define LOCOMO_TC 0x00 /* TFT control signal */
@@ -203,4 +205,7 @@ void locomo_gpio_write(struct locomo_dev *ldev, unsigned int bits, unsigned int
/* M62332 control function */
void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel);
+/* Frontlight control */
+void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
+
#endif
diff --git a/include/asm-arm/mach/map.h b/include/asm-arm/mach/map.h
index e8ea67c97c73..cef5364ed5fe 100644
--- a/include/asm-arm/mach/map.h
+++ b/include/asm-arm/mach/map.h
@@ -16,8 +16,6 @@ struct map_desc {
unsigned int type;
};
-struct meminfo;
-
#define MT_DEVICE 0
#define MT_CACHECLEAN 1
#define MT_MINICLEAN 2
@@ -28,7 +26,8 @@ struct meminfo;
#define MT_IXP2000_DEVICE 7
#define MT_NONSHARED_DEVICE 8
-extern void create_memmap_holes(struct meminfo *);
-extern void memtable_init(struct meminfo *);
+#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
-extern void setup_io_desc(void);
+#else
+#define iotable_init(map,num) do { } while (0)
+#endif
diff --git a/include/asm-arm/mach/pci.h b/include/asm-arm/mach/pci.h
index 25d540ed0079..923e0ca66200 100644
--- a/include/asm-arm/mach/pci.h
+++ b/include/asm-arm/mach/pci.h
@@ -28,7 +28,7 @@ struct hw_pci {
struct pci_sys_data {
struct list_head node;
int busnr; /* primary bus number */
- unsigned long mem_offset; /* bus->cpu memory mapping offset */
+ u64 mem_offset; /* bus->cpu memory mapping offset */
unsigned long io_offset; /* bus->cpu IO mapping offset */
struct pci_bus *bus; /* PCI bus */
struct resource *resource[3]; /* Primary PCI bus resources */
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index 731e321a57d1..94f973b704f1 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -2,6 +2,7 @@
* linux/include/asm-arm/memory.h
*
* Copyright (C) 2000-2002 Russell King
+ * modification for nommu, Hyok S. Choi, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,8 @@
#include <asm/arch/memory.h>
#include <asm/sizes.h>
+#ifdef CONFIG_MMU
+
#ifndef TASK_SIZE
/*
* TASK_SIZE - the maximum size of a user space task.
@@ -48,6 +51,60 @@
#endif
/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULE_END (PAGE_OFFSET)
+#define MODULE_START (MODULE_END - 16*1048576)
+
+#if TASK_SIZE > MODULE_START
+#error Top of user space clashes with start of module space
+#endif
+
+/*
+ * The XIP kernel gets mapped at the bottom of the module vm area.
+ * Since we use sections to map it, this macro replaces the physical address
+ * with its virtual address while keeping offset from the base section.
+ */
+#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
+
+#else /* CONFIG_MMU */
+
+/*
+ * The limitation of user task size can grow up to the end of free ram region.
+ * It is difficult to define and perhaps will never meet the original meaning
+ * of this define that was meant to.
+ * Fortunately, there is no reference for this in noMMU mode, for now.
+ */
+#ifndef TASK_SIZE
+#define TASK_SIZE (CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef TASK_UNMAPPED_BASE
+#define TASK_UNMAPPED_BASE UL(0x00000000)
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET (CONFIG_DRAM_BASE)
+#endif
+
+#ifndef END_MEM
+#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET (PHYS_OFFSET)
+#endif
+
+/*
+ * The module can be at any place in ram in nommu mode.
+ */
+#define MODULE_END (END_MEM)
+#define MODULE_START (PHYS_OFFSET)
+
+#endif /* !CONFIG_MMU */
+
+/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
@@ -71,24 +128,6 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
-/*
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 32MB of the kernel text.
- */
-#define MODULE_END (PAGE_OFFSET)
-#define MODULE_START (MODULE_END - 16*1048576)
-
-#if TASK_SIZE > MODULE_START
-#error Top of user space clashes with start of module space
-#endif
-
-/*
- * The XIP kernel gets mapped at the bottom of the module vm area.
- * Since we use sections to map it, this macro replaces the physical address
- * with its virtual address while keeping offset from the base section.
- */
-#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
-
#ifndef __ASSEMBLY__
/*
diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h
index a457cb71984f..23dde52e0945 100644
--- a/include/asm-arm/mmu.h
+++ b/include/asm-arm/mmu.h
@@ -1,6 +1,8 @@
#ifndef __ARM_MMU_H
#define __ARM_MMU_H
+#ifdef CONFIG_MMU
+
typedef struct {
#if __LINUX_ARM_ARCH__ >= 6
unsigned int id;
@@ -13,4 +15,18 @@ typedef struct {
#define ASID(mm) (0)
#endif
+#else
+
+/*
+ * From nommu.h:
+ * Copyright (C) 2002, David McCullough <davidm@snapgear.com>
+ * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
+ */
+typedef struct {
+ struct vm_list_struct *vmlist;
+ unsigned long end_brk;
+} mm_context_t;
+
+#endif
+
#endif
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index 81c59facea3b..9fadb01e030d 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -82,6 +82,7 @@ static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
if (prev != next) {
@@ -91,6 +92,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (cache_is_vivt())
cpu_clear(cpu, prev->cpu_vm_mask);
}
+#endif
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/include/asm-arm/page-nommu.h b/include/asm-arm/page-nommu.h
new file mode 100644
index 000000000000..a1bcad060480
--- /dev/null
+++ b/include/asm-arm/page-nommu.h
@@ -0,0 +1,51 @@
+/*
+ * linux/include/asm-arm/page-nommu.h
+ *
+ * Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PAGE_NOMMU_H
+#define _ASMARM_PAGE_NOMMU_H
+
+#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
+#define KTHREAD_SIZE (8192)
+#else
+#define KTHREAD_SIZE PAGE_SIZE
+#endif
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 66cfeb5290ea..63d12f0244c5 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -23,6 +23,12 @@
#ifndef __ASSEMBLY__
+#ifndef CONFIG_MMU
+
+#include "page-nommu.h"
+
+#else
+
#include <asm/glue.h>
/*
@@ -171,6 +177,8 @@ typedef unsigned long pgprot_t;
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
+#endif /* CONFIG_MMU */
+
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h
index c4ac2e67768d..4d4394552911 100644
--- a/include/asm-arm/pgalloc.h
+++ b/include/asm-arm/pgalloc.h
@@ -16,6 +16,10 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#define check_pgt_cache() do { } while (0)
+
+#ifdef CONFIG_MMU
+
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
@@ -32,8 +36,6 @@ extern void free_pgd_slow(pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define check_pgt_cache() do { } while (0)
-
/*
* Allocate one PTE table.
*
@@ -126,4 +128,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
+#endif /* CONFIG_MMU */
+
#endif
diff --git a/include/asm-arm/pgtable-nommu.h b/include/asm-arm/pgtable-nommu.h
new file mode 100644
index 000000000000..b13322dccf41
--- /dev/null
+++ b/include/asm-arm/pgtable-nommu.h
@@ -0,0 +1,123 @@
+/*
+ * linux/include/asm-arm/pgtable-nommu.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ * Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_NOMMU_H
+#define _ASMARM_PGTABLE_NOMMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+/*
+ * Trivial page table functions.
+ */
+#define pgd_present(pgd) (1)
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+#define pmd_offset(a, b) ((void *)0)
+/* FIXME */
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT 21
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* FIXME */
+
+#define PAGE_NONE __pgprot(0)
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_COPY __pgprot(0)
+#define PAGE_READONLY __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+
+//extern void paging_init(struct meminfo *, struct machine_desc *);
+#define swapper_pg_dir ((pgd_t *) 0)
+
+#define __swp_type(x) (0)
+#define __swp_offset(x) (0)
+#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+
+typedef pte_t *pte_addr_t;
+
+static inline int pte_file(pte_t pte) { return 0; }
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(0))
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot) __pgprot(0)
+#define pgprot_writecombine(prot) __pgprot(0)
+
+
+/*
+ * These would be in other places but having them here reduces the diffs.
+ */
+extern unsigned int kobjsize(const void *objp);
+extern int is_in_rom(unsigned long);
+
+/*
+ * No page table caches to initialise.
+ */
+#define pgtable_cache_init() do { } while (0)
+#define io_remap_page_range remap_page_range
+#define io_remap_pfn_range remap_pfn_range
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define VMALLOC_START 0
+#define VMALLOC_END 0xffffffff
+
+#define FIRST_USER_ADDRESS (0)
+
+#else
+
+/*
+ * dummy tlb and user structures.
+ */
+#define v3_tlb_fns (0)
+#define v4_tlb_fns (0)
+#define v4wb_tlb_fns (0)
+#define v4wbi_tlb_fns (0)
+#define v6_tlb_fns (0)
+
+#define v3_user_fns (0)
+#define v4_user_fns (0)
+#define v4_mc_user_fns (0)
+#define v4wb_user_fns (0)
+#define v4wt_user_fns (0)
+#define v6_user_fns (0)
+#define xscale_mc_user_fns (0)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index e85c08d78dda..8d3919c6458c 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -11,9 +11,15 @@
#define _ASMARM_PGTABLE_H
#include <asm-generic/4level-fixup.h>
+#include <asm/proc-fns.h>
+
+#ifndef CONFIG_MMU
+
+#include "pgtable-nommu.h"
+
+#else
#include <asm/memory.h>
-#include <asm/proc-fns.h>
#include <asm/arch/vmalloc.h>
/*
@@ -378,4 +384,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_MMU */
+
#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index e9310895e79d..1bde92cdaebd 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -165,6 +165,8 @@
#include <asm/memory.h>
+#ifdef CONFIG_MMU
+
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#define cpu_get_pgd() \
@@ -176,6 +178,8 @@
(pgd_t *)phys_to_virt(pg); \
})
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/include/asm-arm/ptrace.h b/include/asm-arm/ptrace.h
index 2bebe3dc0a30..5a8ef787dbf8 100644
--- a/include/asm-arm/ptrace.h
+++ b/include/asm-arm/ptrace.h
@@ -25,6 +25,11 @@
#define PTRACE_SET_SYSCALL 23
+/* PTRACE_SYSCALL is 24 */
+
+#define PTRACE_GETCRUNCHREGS 25
+#define PTRACE_SETCRUNCHREGS 26
+
/*
* PSR bits
*/
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index cfbccb63c67b..c46b5c84275f 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -59,6 +59,7 @@ struct thread_info {
struct cpu_context_save cpu_context; /* cpu context */
__u8 used_cp[16]; /* thread used copro */
unsigned long tp_value;
+ struct crunch_state crunchstate;
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
struct restart_block restart_block;
@@ -101,6 +102,11 @@ extern void free_thread_info(struct thread_info *);
#define thread_saved_fp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+extern void crunch_task_disable(struct thread_info *);
+extern void crunch_task_copy(struct thread_info *, void *);
+extern void crunch_task_restore(struct thread_info *, void *);
+extern void crunch_task_release(struct thread_info *);
+
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
extern void iwmmxt_task_restore(struct thread_info *, void *);
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index 064f0f5e8e2b..87aba57a66c4 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -41,15 +41,24 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
-#define USER_DS TASK_SIZE
-
#define get_ds() (KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS TASK_SIZE
#define get_fs() (current_thread_info()->addr_limit)
-static inline void set_fs (mm_segment_t fs)
+static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
: "cc"); \
flag; })
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
* fixup code, but there are a few places where it intrudes on the
* main code path. When we only write to user space, there is no
* problem.
- *
- * The "__xxx" versions of the user access functions do not verify the
- * address space - it must have been done previously with a separate
- * "access_ok()" call.
- *
- * The "xxx_error" versions set the third argument to EFAULT if an
- * error occurs, and leave it unchanged on success. Note that these
- * versions are void (ie, don't return a value as such).
*/
-
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
__asm__ __volatile__ ( \
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
__e; \
})
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2) \
+ : "ip", "lr", "cc")
+
+#define put_user(x,p) \
+ ({ \
+ const register typeof(*(p)) __r2 asm("r2") = (x); \
+ const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+ __put_user_x(__r2, __p, __e, 1); \
+ break; \
+ case 2: \
+ __put_user_x(__r2, __p, __e, 2); \
+ break; \
+ case 4: \
+ __put_user_x(__r2, __p, __e, 4); \
+ break; \
+ case 8: \
+ __put_user_x(__r2, __p, __e, 8); \
+ break; \
+ default: __e = __put_user_bad(); break; \
+ } \
+ __e; \
+ })
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS KERNEL_DS
+
+#define segment_eq(a,b) (1)
+#define __addr_ok(addr) (1)
+#define __range_ok(addr,size) (0)
+#define get_fs() (KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p) __get_user(x,p)
+#define put_user(x,p) __put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success. Note that these
+ * versions are void (ie, don't return a value as such).
+ */
#define __get_user(x,ptr) \
({ \
long __gu_err = 0; \
@@ -212,43 +277,6 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-extern int __put_user_1(void *, unsigned int);
-extern int __put_user_2(void *, unsigned int);
-extern int __put_user_4(void *, unsigned int);
-extern int __put_user_8(void *, unsigned long long);
-extern int __put_user_bad(void);
-
-#define __put_user_x(__r2,__p,__e,__s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
- : "ip", "lr", "cc")
-
-#define put_user(x,p) \
- ({ \
- const register typeof(*(p)) __r2 asm("r2") = (x); \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
- register int __e asm("r0"); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- __e; \
- })
-
#define __put_user(x,ptr) \
({ \
long __pu_err = 0; \
@@ -353,66 +381,54 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
-extern unsigned long __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __arch_clear_user(void __user *addr, unsigned long n);
-extern unsigned long __arch_strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __arch_strnlen_user(const char __user *s, long n);
+
+#ifdef CONFIG_MMU
+extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
+#endif
+
+extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
+extern unsigned long __strnlen_user(const char __user *s, long n);
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
- n = __arch_copy_from_user(to, from, n);
+ n = __copy_from_user(to, from, n);
else /* security hole - plug it */
memzero(to, n);
return n;
}
-static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- return __arch_copy_from_user(to, from, n);
-}
-
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __arch_copy_to_user(to, from, n);
+ n = __copy_to_user(to, from, n);
return n;
}
-static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- return __arch_copy_to_user(to, from, n);
-}
-
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-static inline unsigned long clear_user (void __user *to, unsigned long n)
+static inline unsigned long clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __arch_clear_user(to, n);
+ n = __clear_user(to, n);
return n;
}
-static inline unsigned long __clear_user (void __user *to, unsigned long n)
-{
- return __arch_clear_user(to, n);
-}
-
-static inline long strncpy_from_user (char *dst, const char __user *src, long count)
+static inline long strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
- res = __arch_strncpy_from_user(dst, src, count);
+ res = __strncpy_from_user(dst, src, count);
return res;
}
-static inline long __strncpy_from_user (char *dst, const char __user *src, long count)
-{
- return __arch_strncpy_from_user(dst, src, count);
-}
-
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
static inline long strnlen_user(const char __user *s, long n)
@@ -420,7 +436,7 @@ static inline long strnlen_user(const char __user *s, long n)
unsigned long res = 0;
if (__addr_ok(s))
- res = __arch_strnlen_user(s, n);
+ res = __strnlen_user(s, n);
return res;
}
diff --git a/include/asm-arm/ucontext.h b/include/asm-arm/ucontext.h
index 9e6f7ca9f5ae..bf65e9f4525d 100644
--- a/include/asm-arm/ucontext.h
+++ b/include/asm-arm/ucontext.h
@@ -35,6 +35,17 @@ struct ucontext {
* bytes, to prevent unpredictable padding in the signal frame.
*/
+#ifdef CONFIG_CRUNCH
+#define CRUNCH_MAGIC 0x5065cf03
+#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
+
+struct crunch_sigframe {
+ unsigned long magic;
+ unsigned long size;
+ struct crunch_state storage;
+} __attribute__((__aligned__(8)));
+#endif
+
#ifdef CONFIG_IWMMXT
/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
#define IWMMXT_MAGIC 0x12ef842a
@@ -74,6 +85,9 @@ struct vfp_sigframe
* one of these.
*/
struct aux_sigframe {
+#ifdef CONFIG_CRUNCH
+ struct crunch_sigframe crunch;
+#endif
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
diff --git a/include/asm-cris/hw_irq.h b/include/asm-cris/hw_irq.h
index 341536a234e9..298066020af2 100644
--- a/include/asm-cris/hw_irq.h
+++ b/include/asm-cris/hw_irq.h
@@ -1,7 +1,5 @@
#ifndef _ASM_HW_IRQ_H
#define _ASM_HW_IRQ_H
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-
#endif
diff --git a/include/asm-cris/irq.h b/include/asm-cris/irq.h
index 4b338792218b..998cce9f3200 100644
--- a/include/asm-cris/irq.h
+++ b/include/asm-cris/irq.h
@@ -1,11 +1,6 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
-/*
- * IRQ line status macro IRQ_PER_CPU is used
- */
-#define ARCH_HAS_IRQ_PER_CPU
-
#include <asm/arch/irq.h>
static inline int irq_canonicalize(int irq)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 845cb67ad8ea..8ceab7bcd8b4 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -51,4 +51,10 @@
__ret; \
})
+#ifdef CONFIG_SMP
+# define WARN_ON_SMP(x) WARN_ON(x)
+#else
+# define WARN_ON_SMP(x) do { } while (0)
+#endif
+
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9d11550b4818..db5a3732f106 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -58,6 +58,20 @@
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
+ /* Kernel symbol table: Normal unused symbols */ \
+ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
+ *(__ksymtab_unused) \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ *(__ksymtab_unused_gpl) \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ } \
+ \
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
@@ -79,6 +93,20 @@
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
+ /* Kernel symbol table: Normal unused symbols */ \
+ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
+ *(__kcrctab_unused) \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ *(__kcrctab_unused_gpl) \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ } \
+ \
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index d79e9ee10fd7..c61bd1a17f37 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -5,6 +5,8 @@
#include <asm/types.h>
+#include <linux/types.h>
+
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 1d8362cb2c5d..2c1e371cebb6 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -111,24 +111,12 @@ extern void init_apic_mappings (void);
extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
-extern void setup_apic_nmi_watchdog (void);
-extern int reserve_lapic_nmi(void);
-extern void release_lapic_nmi(void);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern void nmi_watchdog_tick (struct pt_regs * regs);
extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
extern void enable_APIC_timer(void);
extern void enable_NMI_through_LVT0 (void * dummy);
-extern unsigned int nmi_watchdog;
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
-
extern int disable_timer_pin_1;
void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index e7252c216ca8..b1bc7b1b64b0 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -7,8 +7,6 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
-#include <asm/node.h>
-
struct i386_cpu {
struct cpu cpu;
};
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index 3ecedbafa8ce..d314ebb3d59e 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -72,6 +72,7 @@
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index 456db8501c09..b1c7650dc7b9 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -23,4 +23,6 @@ extern void __delay(unsigned long loops);
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
+void use_tsc_delay(void);
+
#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
new file mode 100644
index 000000000000..2280f6272f80
--- /dev/null
+++ b/include/asm-i386/dwarf2.h
@@ -0,0 +1,54 @@
+#ifndef _DWARF2_H
+#define _DWARF2_H
+
+#include <linux/config.h>
+
+#ifndef __ASSEMBLY__
+#warning "asm/dwarf2.h should be only included in pure assembly files"
+#endif
+
+/*
+ Macros for dwarf2 CFI unwind table entries.
+ See "as.info" for details on these pseudo ops. Unfortunately
+ they are only supported in very new binutils, so define them
+ away for older version.
+ */
+
+#ifdef CONFIG_UNWIND_INFO
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_REMEMBER_STATE .cfi_remember_state
+#define CFI_RESTORE_STATE .cfi_restore_state
+
+#else
+
+/* Due to the structure of pre-exisiting code, don't use assembler line
+ comment character # to ignore the arguments. Instead, use a dummy macro. */
+.macro ignore a=0, b=0, c=0, d=0
+.endm
+
+#define CFI_STARTPROC ignore
+#define CFI_ENDPROC ignore
+#define CFI_DEF_CFA ignore
+#define CFI_DEF_CFA_REGISTER ignore
+#define CFI_DEF_CFA_OFFSET ignore
+#define CFI_ADJUST_CFA_OFFSET ignore
+#define CFI_OFFSET ignore
+#define CFI_REL_OFFSET ignore
+#define CFI_REGISTER ignore
+#define CFI_RESTORE ignore
+#define CFI_REMEMBER_STATE ignore
+#define CFI_RESTORE_STATE ignore
+
+#endif
+
+#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 4153d80e4d2b..1eac92cb5b16 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/system.h> /* for savesegment */
#include <asm/auxvec.h>
+#include <asm/desc.h>
#include <linux/utsname.h>
@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
-#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL))
-#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE)
-#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
+#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
+#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
+
+#ifdef CONFIG_COMPAT_VDSO
+# define VDSO_COMPAT_BASE VDSO_HIGH_BASE
+# define VDSO_PRELINK VDSO_HIGH_BASE
+#else
+# define VDSO_COMPAT_BASE VDSO_BASE
+# define VDSO_PRELINK 0
+#endif
+
+#define VDSO_COMPAT_SYM(x) \
+ (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK)
+
+#define VDSO_SYM(x) \
+ (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK)
+
+#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
+#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE)
+
extern void __kernel_vsyscall;
+#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack);
+
+extern unsigned int vdso_enabled;
+
#define ARCH_DLINFO \
-do { \
- NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \
+do if (vdso_enabled) { \
+ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
} while (0)
/*
@@ -148,15 +175,15 @@ do { \
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used.
*/
-#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum)
+#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
#define ELF_CORE_WRITE_EXTRA_PHDRS \
do { \
const struct elf_phdr *const vsyscall_phdrs = \
- (const struct elf_phdr *) (VSYSCALL_BASE \
- + VSYSCALL_EHDR->e_phoff); \
+ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ + VDSO_HIGH_EHDR->e_phoff); \
int i; \
Elf32_Off ofs = 0; \
- for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \
+ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
struct elf_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \
@@ -174,10 +201,10 @@ do { \
#define ELF_CORE_WRITE_EXTRA_DATA \
do { \
const struct elf_phdr *const vsyscall_phdrs = \
- (const struct elf_phdr *) (VSYSCALL_BASE \
- + VSYSCALL_EHDR->e_phoff); \
+ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ + VDSO_HIGH_EHDR->e_phoff); \
int i; \
- for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \
+ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index f7e068f4d2f9..a48cc3f7ccc6 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -51,7 +51,7 @@
*/
enum fixed_addresses {
FIX_HOLE,
- FIX_VSYSCALL,
+ FIX_VDSO,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx,
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-/*
- * This is the range that is readable by user mode, and things
- * acting like user mode such as get_user_pages.
- */
-#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
-#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
-
-
extern void __this_fixmap_does_not_exist(void);
/*
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 95d3fd090298..87e5a351d881 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -19,6 +19,8 @@
struct hw_interrupt_type;
+#define NMI_VECTOR 0x02
+
/*
* Various low-level irq details needed by irq.c, process.c,
* time.c, io_apic.c and smp.c
@@ -67,14 +69,4 @@ extern atomic_t irq_mis_count;
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-#if defined(CONFIG_X86_IO_APIC)
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{
- if (IO_APIC_IRQ(i))
- send_IPI_self(IO_APIC_VECTOR(i));
-}
-#else
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-#endif
-
#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
new file mode 100644
index 000000000000..134ea9cc5283
--- /dev/null
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
+#ifndef X86_INTEL_ARCH_PERFMON_H
+#define X86_INTEL_ARCH_PERFMON_H 1
+
+#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
+#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
+
+#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
+#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
+
+#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
+#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
+#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
+#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
+
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
+
+#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h
new file mode 100644
index 000000000000..dfd88a6e6040
--- /dev/null
+++ b/include/asm-i386/k8.h
@@ -0,0 +1 @@
+#include <asm-x86_64/k8.h>
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 96d0828ce096..d18cdb9fc9a6 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -19,6 +19,8 @@ struct die_args {
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
+extern int register_page_fault_notifier(struct notifier_block *);
+extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head i386die_chain;
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 57d157c5cf89..0730a20f6db8 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -44,6 +44,7 @@ typedef u8 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
+#define ARCH_INACTIVE_KPROBE_COUNT 0
void arch_remove_kprobe(struct kprobe *p);
void kretprobe_trampoline(void);
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e67fa08260fe..3b4998c51d08 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (v); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(v) \
+ ({ preempt_disable(); \
+ v; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
#define __cpu_local_inc(v) cpu_local_inc(v)
#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index a1d0072e36bc..0dba244c86db 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -1,6 +1,9 @@
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
void send_IPI_mask_bitmask(cpumask_t mask, int vector);
void __send_IPI_shortcut(unsigned int shortcut, int vector);
@@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector)
static inline void __local_send_IPI_allbutself(int vector)
{
- if (no_broadcast) {
+ if (no_broadcast || vector == NMI_VECTOR) {
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
@@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector)
static inline void __local_send_IPI_all(int vector)
{
- if (no_broadcast)
+ if (no_broadcast || vector == NMI_VECTOR)
send_IPI_mask(cpu_online_map, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h
index 4b9703bb0288..807992fd4171 100644
--- a/include/asm-i386/mach-default/mach_timer.h
+++ b/include/asm-i386/mach-default/mach_timer.h
@@ -15,7 +15,9 @@
#ifndef _MACH_TIMER_H
#define _MACH_TIMER_H
-#define CALIBRATE_LATCH (5 * LATCH)
+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
+#define CALIBRATE_LATCH \
+ ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
static inline void mach_prepare_counter(void)
{
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
index 1cce2b924a80..94268399170d 100644
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -2,6 +2,7 @@
#define __ASM_MACH_MPPARSE_H
#include <mach_apic.h>
+#include <asm/tsc.h>
extern int use_cyclone;
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
(!strncmp(productid, "VIGIL SMP", 9)
|| !strncmp(productid, "EXA", 3)
|| !strncmp(productid, "RUTHLESS SMP", 12))){
+ mark_tsc_unstable();
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
return 1;
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
if (!strncmp(oem_id, "IBM", 3) &&
(!strncmp(oem_table_id, "SERVIGIL", 8)
|| !strncmp(oem_table_id, "EXA", 3))){
+ mark_tsc_unstable();
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
return 1;
diff --git a/include/asm-i386/mach-visws/setup_arch.h b/include/asm-i386/mach-visws/setup_arch.h
index b92d6d9a4d3c..33f700ef6831 100644
--- a/include/asm-i386/mach-visws/setup_arch.h
+++ b/include/asm-i386/mach-visws/setup_arch.h
@@ -1,5 +1,8 @@
/* Hook to call BIOS initialisation function */
+extern unsigned long sgivwfb_mem_phys;
+extern unsigned long sgivwfb_mem_size;
+
/* no action for visws */
#define ARCH_SETUP
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
index f431a0b86d4c..8358dd3df7aa 100644
--- a/include/asm-i386/mmu.h
+++ b/include/asm-i386/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
int size;
struct semaphore sem;
void *ldt;
+ void *vdso;
} mm_context_t;
#endif
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 21f16638fc61..67d994799999 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,24 +5,38 @@
#define ASM_NMI_H
#include <linux/pm.h>
-
+
struct pt_regs;
-
+
typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-
-/**
+
+/**
* set_nmi_callback
*
* Set a handler for an NMI. Only one handler may be
* set. Return 1 if the NMI was handled.
*/
void set_nmi_callback(nmi_callback_t callback);
-
-/**
+
+/**
* unset_nmi_callback
*
* Remove the handler previously set.
*/
void unset_nmi_callback(void);
-
+
+extern void setup_apic_nmi_watchdog (void);
+extern int reserve_lapic_nmi(void);
+extern void release_lapic_nmi(void);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern void nmi_watchdog_tick (struct pt_regs * regs);
+
+extern unsigned int nmi_watchdog;
+#define NMI_DEFAULT -1
+#define NMI_NONE 0
+#define NMI_IO_APIC 1
+#define NMI_LOCAL_APIC 2
+#define NMI_INVALID 3
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h
deleted file mode 100644
index e13c6ffa72ae..000000000000
--- a/include/asm-i386/node.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_I386_NODE_H_
-#define _ASM_I386_NODE_H_
-
-#include <linux/device.h>
-#include <linux/mmzone.h>
-#include <linux/node.h>
-#include <linux/topology.h>
-#include <linux/nodemask.h>
-
-struct i386_node {
- struct node node;
-};
-extern struct i386_node node_devices[MAX_NUMNODES];
-
-static inline int arch_register_node(int num){
- int p_node;
- struct node *parent = NULL;
-
- if (!node_online(num))
- return 0;
- p_node = parent_node(num);
-
- if (p_node != num)
- parent = &node_devices[p_node].node;
-
- return register_node(&node_devices[num].node, num, parent);
-}
-
-#endif /* _ASM_I386_NODE_H_ */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index e3a552fa5538..f5bf544c729a 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__
+struct vm_area_struct;
+
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr);
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
+#define __HAVE_ARCH_GATE_AREA 1
#endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 0c83cf12eec9..b32346d62e10 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -71,8 +71,12 @@ struct cpuinfo_x86 {
cpumask_t llc_shared_map; /* cpus sharing the last level cache */
#endif
unsigned char x86_max_cores; /* cpuid returned max cores value */
- unsigned char booted_cores; /* number of cores as seen by OS */
unsigned char apicid;
+#ifdef CONFIG_SMP
+ unsigned char booted_cores; /* number of cores as seen by OS */
+ __u8 phys_proc_id; /* Physical processor id. */
+ __u8 cpu_core_id; /* Core id */
+#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
@@ -104,14 +108,13 @@ extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data
#endif
-extern int phys_proc_id[NR_CPUS];
-extern int cpu_core_id[NR_CPUS];
extern int cpu_llc_id[NR_CPUS];
extern char ignore_fpu_irq;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern unsigned short num_cache_leaves;
#ifdef CONFIG_X86_HT
extern void detect_ht(struct cpuinfo_x86 *c);
@@ -554,7 +557,7 @@ extern void prepare_to_copy(struct task_struct *tsk);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
-void show_trace(struct task_struct *task, unsigned long *stack);
+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
unsigned long get_wchan(struct task_struct *p);
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 0249f912a29c..cab0180567f9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8420ed12491e..2833fa2c0dd0 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
+ void *sysenter_return;
struct restart_block restart_block;
unsigned long previous_esp; /* ESP of the previous stack in case
@@ -83,17 +84,15 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("esp") __attribute_used__;
+
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
- struct thread_info *ti;
- __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
- return ti;
+ return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
}
-/* how to get the current stack pointer from C */
-register unsigned long current_stack_pointer asm("esp") __attribute_used__;
-
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info(tsk) \
@@ -140,8 +139,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17
+#define TIF_MEMDIE 16
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -153,7 +151,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -170,6 +167,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
+
+#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index aed16437479d..d0ebd05f8516 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -3,68 +3,11 @@
#include <linux/init.h>
#include <linux/pm.h>
-/**
- * struct timer_ops - used to define a timer source
- *
- * @name: name of the timer.
- * @init: Probes and initializes the timer. Takes clock= override
- * string as an argument. Returns 0 on success, anything else
- * on failure.
- * @mark_offset: called by the timer interrupt.
- * @get_offset: called by gettimeofday(). Returns the number of microseconds
- * since the last timer interupt.
- * @monotonic_clock: returns the number of nanoseconds since the init of the
- * timer.
- * @delay: delays this many clock cycles.
- */
-struct timer_opts {
- char* name;
- void (*mark_offset)(void);
- unsigned long (*get_offset)(void);
- unsigned long long (*monotonic_clock)(void);
- void (*delay)(unsigned long);
- unsigned long (*read_timer)(void);
- int (*suspend)(pm_message_t state);
- int (*resume)(void);
-};
-
-struct init_timer_opts {
- int (*init)(char *override);
- struct timer_opts *opts;
-};
-
#define TICK_SIZE (tick_nsec / 1000)
-
-extern struct timer_opts* __init select_timer(void);
-extern void clock_fallback(void);
void setup_pit_timer(void);
-
/* Modifiers for buggy PIT handling */
-
extern int pit_latch_buggy;
-
-extern struct timer_opts *cur_timer;
extern int timer_ack;
-
-/* list of externed timers */
-extern struct timer_opts timer_none;
-extern struct timer_opts timer_pit;
-extern struct init_timer_opts timer_pit_init;
-extern struct init_timer_opts timer_tsc_init;
-#ifdef CONFIG_X86_CYCLONE_TIMER
-extern struct init_timer_opts timer_cyclone_init;
-#endif
-
-extern unsigned long calibrate_tsc(void);
-extern unsigned long read_timer_tsc(void);
-extern void init_cpu_khz(void);
extern int recalibrate_cpu_khz(void);
-#ifdef CONFIG_HPET_TIMER
-extern struct init_timer_opts timer_hpet_init;
-extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
-#endif
-#ifdef CONFIG_X86_PM_TIMER
-extern struct init_timer_opts timer_pmtmr_init;
-#endif
#endif
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index d434984303ca..3666044409f0 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -7,6 +7,7 @@
#define _ASMi386_TIMEX_H
#include <asm/processor.h>
+#include <asm/tsc.h>
#ifdef CONFIG_X86_ELAN
# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
@@ -15,39 +16,6 @@
#endif
-/*
- * Standard way to access the cycle counter on i586+ CPUs.
- * Currently only used on SMP.
- *
- * If you really have a SMP machine with i486 chips or older,
- * compile for that, and this will just always return zero.
- * That's ok, it just means that the nicer scheduling heuristics
- * won't work for you.
- *
- * We only use the low 32 bits, and we'd simply better make sure
- * that we reschedule before that wraps. Scheduling at least every
- * four billion cycles just basically sounds like a good idea,
- * regardless of how fast the machine is.
- */
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- unsigned long long ret=0;
-
-#ifndef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
- rdtscll(ret);
-#endif
- return ret;
-}
-
-extern unsigned int cpu_khz;
-
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index b94e5eeef917..6adbd9b1ae88 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -28,10 +28,8 @@
#define _ASM_I386_TOPOLOGY_H
#ifdef CONFIG_X86_HT
-#define topology_physical_package_id(cpu) \
- (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
-#define topology_core_id(cpu) \
- (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
+#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
+#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif
@@ -114,4 +112,9 @@ extern unsigned long node_remap_size[];
extern cpumask_t cpu_coregroup_map(int cpu);
+#ifdef CONFIG_SMP
+#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
+#define smt_capable() (smp_num_siblings > 1)
+#endif
+
#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
new file mode 100644
index 000000000000..97b828ce31e0
--- /dev/null
+++ b/include/asm-i386/tsc.h
@@ -0,0 +1,49 @@
+/*
+ * linux/include/asm-i386/tsc.h
+ *
+ * i386 TSC related functions
+ */
+#ifndef _ASM_i386_TSC_H
+#define _ASM_i386_TSC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+
+/*
+ * Standard way to access the cycle counter on i586+ CPUs.
+ * Currently only used on SMP.
+ *
+ * If you really have a SMP machine with i486 chips or older,
+ * compile for that, and this will just always return zero.
+ * That's ok, it just means that the nicer scheduling heuristics
+ * won't work for you.
+ *
+ * We only use the low 32 bits, and we'd simply better make sure
+ * that we reschedule before that wraps. Scheduling at least every
+ * four billion cycles just basically sounds like a good idea,
+ * regardless of how fast the machine is.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+ rdtscll(ret);
+#endif
+ return ret;
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(void);
+
+#endif
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
new file mode 100644
index 000000000000..69f0f1df6722
--- /dev/null
+++ b/include/asm-i386/unwind.h
@@ -0,0 +1,98 @@
+#ifndef _ASM_I386_UNWIND_H
+#define _ASM_I386_UNWIND_H
+
+/*
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+#ifdef CONFIG_STACK_UNWIND
+
+#include <linux/sched.h>
+#include <asm/fixmap.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+struct unwind_frame_info
+{
+ struct pt_regs regs;
+ struct task_struct *task;
+};
+
+#define UNW_PC(frame) (frame)->regs.eip
+#define UNW_SP(frame) (frame)->regs.esp
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) (frame)->regs.ebp
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
+#define STACK_TOP(tsk) ((tsk)->thread.esp0)
+#endif
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(eax), \
+ PTREGS_INFO(ecx), \
+ PTREGS_INFO(edx), \
+ PTREGS_INFO(ebx), \
+ PTREGS_INFO(esp), \
+ PTREGS_INFO(ebp), \
+ PTREGS_INFO(esi), \
+ PTREGS_INFO(edi), \
+ PTREGS_INFO(eip)
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ /*const*/ struct pt_regs *regs)
+{
+ if (user_mode_vm(regs))
+ info->regs = *regs;
+ else {
+ memcpy(&info->regs, regs, offsetof(struct pt_regs, esp));
+ info->regs.esp = (unsigned long)&regs->esp;
+ info->regs.xss = __KERNEL_DS;
+ }
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ memset(&info->regs, 0, sizeof(info->regs));
+ info->regs.eip = info->task->thread.eip;
+ info->regs.xcs = __KERNEL_CS;
+ __get_user(info->regs.ebp, (long *)info->task->thread.esp);
+ info->regs.esp = info->task->thread.esp;
+ info->regs.xss = __KERNEL_DS;
+ info->regs.xds = __USER_DS;
+ info->regs.xes = __USER_DS;
+}
+
+extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
+ asmlinkage int (*callback)(struct unwind_frame_info *,
+ void *arg),
+ void *arg);
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+#if 0 /* This can only work when selector register and EFLAGS saves/restores
+ are properly annotated (and tracked in UNW_REGISTER_INFO). */
+ return user_mode_vm(&info->regs);
+#else
+ return info->regs.eip < PAGE_OFFSET
+ || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
+ && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+ || info->regs.esp < PAGE_OFFSET;
+#endif
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+
+static inline int arch_unw_user_mode(const void *info)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index ea8b8c407ab4..27f9df6b9145 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -97,8 +97,7 @@ extern int reserve_irq_vector (int vector);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
-static inline void
-hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
+static inline void ia64_resend_irq(unsigned int vector)
{
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
}
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index dbe86c0bbce5..79479e2c6966 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,11 +14,6 @@
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
-/*
- * IRQ line status macro IRQ_PER_CPU is used
- */
-#define ARCH_HAS_IRQ_PER_CPU
-
static __inline__ int
irq_canonicalize (int irq)
{
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index c195a9ad1255..aed7142f9e4a 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -40,6 +40,8 @@ struct die_args {
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
+extern int register_page_fault_notifier(struct notifier_block *);
+extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head ia64die_chain;
enum die_val {
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 8c0fc227f0fb..2418a787c405 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -82,6 +82,7 @@ struct kprobe_ctlblk {
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
+#define ARCH_INACTIVE_KPROBE_COUNT 1
#define SLOT0_OPCODE_SHIFT (37)
#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h
index a140310bf84d..2fb337b0e9b7 100644
--- a/include/asm-ia64/nodedata.h
+++ b/include/asm-ia64/nodedata.h
@@ -46,6 +46,18 @@ struct ia64_node_data {
*/
#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
+/*
+ * LOCAL_DATA_ADDR - This is to calculate the address of other node's
+ * "local_node_data" at hot-plug phase. The local_node_data
+ * is pointed by per_cpu_page. Kernel usually use it for
+ * just executing cpu. However, when new node is hot-added,
+ * the addresses of local data for other nodes are necessary
+ * to update all of them.
+ */
+#define LOCAL_DATA_ADDR(pgdat) \
+ ((struct ia64_node_data *)((u64)(pgdat) + \
+ L1_CACHE_ALIGN(sizeof(struct pglist_data))))
+
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index cd490b20d592..bd4452bda357 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -85,6 +85,7 @@
#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065
#define SN_SAL_SET_OS_FEATURE_SET 0x02000066
#define SN_SAL_INJECT_ERROR 0x02000067
+#define SN_SAL_SET_CPU_NUMBER 0x02000068
/*
* Service-specific constants
@@ -1150,4 +1151,13 @@ sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
local_irq_restore(irq_flags);
return ret_stuff.status;
}
+
+static inline int
+ia64_sn_set_cpu_number(int cpu)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index e5392c4d30c6..8bc9869e5765 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -27,6 +27,7 @@ struct thread_info {
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
__u32 last_cpu; /* Last CPU thread ran on */
+ __u32 status; /* Thread synchronous flags */
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
@@ -103,4 +104,8 @@ struct thread_info {
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
+#define TS_POLLING 1 /* true if in idle loop and not sleeping */
+
+#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+
#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 616b5ed2aa72..937c21257523 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -112,6 +112,7 @@ void build_cpu_to_node_map(void);
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#define smt_capable() (smp_num_siblings > 1)
#endif
#include <asm-generic/topology.h>
diff --git a/include/asm-m32r/hw_irq.h b/include/asm-m32r/hw_irq.h
index 8d7e9d0e09e8..7138537cda03 100644
--- a/include/asm-m32r/hw_irq.h
+++ b/include/asm-m32r/hw_irq.h
@@ -1,9 +1,4 @@
#ifndef _ASM_M32R_HW_IRQ_H
#define _ASM_M32R_HW_IRQ_H
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{
- /* Nothing to do */
-}
-
#endif /* _ASM_M32R_HW_IRQ_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 33567e8bfe6b..66c4742f09e7 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -318,7 +318,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
diff --git a/include/asm-m68knommu/bootstd.h b/include/asm-m68knommu/bootstd.h
index 3fdc79f06d50..bdc1a4ac4fe9 100644
--- a/include/asm-m68knommu/bootstd.h
+++ b/include/asm-m68knommu/bootstd.h
@@ -52,7 +52,7 @@ type name(void) \
__asm__ __volatile__ ("trap #2" \
: "=g" (__res) \
: "0" (__res) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
@@ -64,7 +64,7 @@ type name(atype a) \
__asm__ __volatile__ ("trap #2" \
: "=g" (__res) \
: "0" (__res), "d" (__a) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
@@ -77,7 +77,7 @@ type name(atype a, btype b) \
__asm__ __volatile__ ("trap #2" \
: "=g" (__res) \
: "0" (__res), "d" (__a), "d" (__b) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
@@ -92,7 +92,7 @@ type name(atype a, btype b, ctype c) \
: "=g" (__res) \
: "0" (__res), "d" (__a), "d" (__b), \
"d" (__c) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
@@ -108,7 +108,7 @@ type name(atype a, btype b, ctype c, dtype d) \
: "=g" (__res) \
: "0" (__res), "d" (__a), "d" (__b), \
"d" (__c), "d" (__d) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
@@ -125,7 +125,7 @@ type name(atype a, btype b, ctype c, dtype d, etype e) \
: "=g" (__res) \
: "0" (__res), "d" (__a), "d" (__b), \
"d" (__c), "d" (__d), "d" (__e) \
- : "%d0"); \
+ ); \
__bsc_return(type,__res); \
}
diff --git a/include/asm-m68knommu/cacheflush.h b/include/asm-m68knommu/cacheflush.h
index 49925e91e89c..c3aadf3b0d88 100644
--- a/include/asm-m68knommu/cacheflush.h
+++ b/include/asm-m68knommu/cacheflush.h
@@ -57,13 +57,13 @@ static inline void __flush_cache_all(void)
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M527x || CONFIG_M528x */
-#ifdef CONFIG_M5272
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
__asm__ __volatile__ (
- "movel #0x01000000, %%d0\n\t"
+ "movel #0x81000100, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
-#endif /* CONFIG_M5272 */
+#endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */
#ifdef CONFIG_M5249
__asm__ __volatile__ (
"movel #0xa1000200, %%d0\n\t"
@@ -71,6 +71,13 @@ static inline void __flush_cache_all(void)
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M5249 */
+#ifdef CONFIG_M532x
+ __asm__ __volatile__ (
+ "movel #0x81000200, %%d0\n\t"
+ "movec %%d0, %%CACR\n\t"
+ "nop\n\t"
+ : : : "d0" );
+#endif /* CONFIG_M532x */
}
#endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/include/asm-m68knommu/coldfire.h b/include/asm-m68knommu/coldfire.h
index 2fabca91df83..83a9fa4e618a 100644
--- a/include/asm-m68knommu/coldfire.h
+++ b/include/asm-m68knommu/coldfire.h
@@ -3,7 +3,7 @@
/*
* coldfire.h -- Motorola ColdFire CPU sepecific defines
*
- * (C) Copyright 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 1999-2006, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2000, Lineo (www.lineo.com)
*/
@@ -14,6 +14,19 @@
/*
+ * Define master clock frequency. This is essentially done at config
+ * time now. No point enumerating dozens of possible clock options
+ * here. Also the peripheral clock (bus clock) divide ratio is set
+ * at config time too.
+ */
+#ifdef CONFIG_CLOCK_SET
+#define MCF_CLK CONFIG_CLOCK_FREQ
+#define MCF_BUSCLK (CONFIG_CLOCK_FREQ / CONFIG_CLOCK_DIV)
+#else
+#error "Don't know what your ColdFire CPU clock frequency is??"
+#endif
+
+/*
* Define the processor support peripherals base address.
* This is generally setup by the boards start up code.
*/
@@ -29,64 +42,9 @@
defined(CONFIG_M520x)
#undef MCF_MBAR
#define MCF_MBAR MCF_IPSBAR
-#endif
-
-/*
- * Define master clock frequency.
- */
-#if defined(CONFIG_CLOCK_11MHz)
-#define MCF_CLK 11289600
-#elif defined(CONFIG_CLOCK_16MHz)
-#define MCF_CLK 16000000
-#elif defined(CONFIG_CLOCK_20MHz)
-#define MCF_CLK 20000000
-#elif defined(CONFIG_CLOCK_24MHz)
-#define MCF_CLK 24000000
-#elif defined(CONFIG_CLOCK_25MHz)
-#define MCF_CLK 25000000
-#elif defined(CONFIG_CLOCK_33MHz)
-#define MCF_CLK 33000000
-#elif defined(CONFIG_CLOCK_40MHz)
-#define MCF_CLK 40000000
-#elif defined(CONFIG_CLOCK_45MHz)
-#define MCF_CLK 45000000
-#elif defined(CONFIG_CLOCK_48MHz)
-#define MCF_CLK 48000000
-#elif defined(CONFIG_CLOCK_50MHz)
-#define MCF_CLK 50000000
-#elif defined(CONFIG_CLOCK_54MHz)
-#define MCF_CLK 54000000
-#elif defined(CONFIG_CLOCK_60MHz)
-#define MCF_CLK 60000000
-#elif defined(CONFIG_CLOCK_62_5MHz)
-#define MCF_CLK 62500000
-#elif defined(CONFIG_CLOCK_64MHz)
-#define MCF_CLK 64000000
-#elif defined(CONFIG_CLOCK_66MHz)
-#define MCF_CLK 66000000
-#elif defined(CONFIG_CLOCK_70MHz)
-#define MCF_CLK 70000000
-#elif defined(CONFIG_CLOCK_100MHz)
-#define MCF_CLK 100000000
-#elif defined(CONFIG_CLOCK_140MHz)
-#define MCF_CLK 140000000
-#elif defined(CONFIG_CLOCK_150MHz)
-#define MCF_CLK 150000000
-#elif defined(CONFIG_CLOCK_166MHz)
-#define MCF_CLK 166000000
-#else
-#error "Don't know what your ColdFire CPU clock frequency is??"
-#endif
-
-/*
- * One some ColdFire family members the bus clock (used by internal
- * peripherals) is not the same as the CPU clock.
- */
-#if defined(CONFIG_M523x) || defined(CONFIG_M5249) || defined(CONFIG_M527x) || \
- defined(CONFIG_M520x)
-#define MCF_BUSCLK (MCF_CLK / 2)
-#else
-#define MCF_BUSCLK MCF_CLK
+#elif defined(CONFIG_M532x)
+#undef MCF_MBAR
+#define MCF_MBAR 0x00000000
#endif
/****************************************************************************/
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index c5247516fcfe..53557274eef8 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -83,7 +83,7 @@ extern void (*mach_disable_irq)(unsigned int);
/*
* Some drivers want these entry points
*/
-#define enable_irq(x) 0
+#define enable_irq(x) do { } while (0)
#define disable_irq(x) do { } while (0)
#define disable_irq_nosync(x) disable_irq(x)
diff --git a/include/asm-m68knommu/m5249sim.h b/include/asm-m68knommu/m5249sim.h
index 9344f529bd8f..399814f0b219 100644
--- a/include/asm-m68knommu/m5249sim.h
+++ b/include/asm-m68knommu/m5249sim.h
@@ -157,7 +157,7 @@
movel %d0,0x180(%a1) /* set PLL register */
nop
-#ifdef CONFIG_CLOCK_140MHz
+#if CONFIG_CLOCK_FREQ == 140000000
/*
* Set initial clock frequency. This assumes M5249C3 board
* is fitted with 11.2896MHz crystal. It will program the
diff --git a/include/asm-m68knommu/m532xsim.h b/include/asm-m68knommu/m532xsim.h
new file mode 100644
index 000000000000..1835fd20a82c
--- /dev/null
+++ b/include/asm-m68knommu/m532xsim.h
@@ -0,0 +1,2238 @@
+/****************************************************************************/
+
+/*
+ * m532xsim.h -- ColdFire 5329 registers
+ */
+
+/****************************************************************************/
+#ifndef m532xsim_h
+#define m532xsim_h
+/****************************************************************************/
+
+#define MCF_REG32(x) (*(volatile unsigned long *)(x))
+#define MCF_REG16(x) (*(volatile unsigned short *)(x))
+#define MCF_REG08(x) (*(volatile unsigned char *)(x))
+
+#define MCFINT_VECBASE 64
+#define MCFINT_UART0 26 /* Interrupt number for UART0 */
+#define MCFINT_UART1 27 /* Interrupt number for UART1 */
+
+#define MCF_WTM_WCR MCF_REG16(0xFC098000)
+
+/*
+ * Define the 532x SIM register set addresses.
+ */
+#define MCFSIM_IPRL 0xFC048004
+#define MCFSIM_IPRH 0xFC048000
+#define MCFSIM_IPR MCFSIM_IPRL
+#define MCFSIM_IMRL 0xFC04800C
+#define MCFSIM_IMRH 0xFC048008
+#define MCFSIM_IMR MCFSIM_IMRL
+#define MCFSIM_ICR0 0xFC048040
+#define MCFSIM_ICR1 0xFC048041
+#define MCFSIM_ICR2 0xFC048042
+#define MCFSIM_ICR3 0xFC048043
+#define MCFSIM_ICR4 0xFC048044
+#define MCFSIM_ICR5 0xFC048045
+#define MCFSIM_ICR6 0xFC048046
+#define MCFSIM_ICR7 0xFC048047
+#define MCFSIM_ICR8 0xFC048048
+#define MCFSIM_ICR9 0xFC048049
+#define MCFSIM_ICR10 0xFC04804A
+#define MCFSIM_ICR11 0xFC04804B
+
+/*
+ * Some symbol defines for the above...
+ */
+#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
+#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
+#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
+#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
+#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
+#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
+#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
+#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
+
+
+#define MCFSIM_IMR_MASKALL 0xFFFFFFFF /* All SIM intr sources */
+
+#define MCFSIM_IMR_SIMR0 0xFC04801C
+#define MCFSIM_IMR_SIMR1 0xFC04C01C
+#define MCFSIM_IMR_CIMR0 0xFC04801D
+#define MCFSIM_IMR_CIMR1 0xFC04C01D
+
+#define MCFSIM_ICR_TIMER1 (0xFC048040+32)
+#define MCFSIM_ICR_TIMER2 (0xFC048040+33)
+
+
+/*
+ * Macro to set IMR register. It is 32 bits on the 5307.
+ */
+#define mcf_getimr() \
+ *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR))
+
+#define mcf_setimr(imr) \
+ *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR)) = (imr);
+
+#define mcf_getipr() \
+ *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPR))
+
+#define mcf_getiprl() \
+ *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPRL))
+
+#define mcf_getiprh() \
+ *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPRH))
+
+
+#define mcf_enable_irq0(irq) \
+ *((volatile unsigned char*) (MCFSIM_IMR_CIMR0)) = (irq);
+
+#define mcf_enable_irq1(irq) \
+ *((volatile unsigned char*) (MCFSIM_IMR_CIMR1)) = (irq);
+
+#define mcf_disable_irq0(irq) \
+ *((volatile unsigned char*) (MCFSIM_IMR_SIMR0)) = (irq);
+
+#define mcf_disable_irq1(irq) \
+ *((volatile unsigned char*) (MCFSIM_IMR_SIMR1)) = (irq);
+
+/*
+ * Define the Cache register flags.
+ */
+#define CACR_EC (1<<31)
+#define CACR_ESB (1<<29)
+#define CACR_DPI (1<<28)
+#define CACR_HLCK (1<<27)
+#define CACR_CINVA (1<<24)
+#define CACR_DNFB (1<<10)
+#define CACR_DCM_WTHRU (0<<8)
+#define CACR_DCM_WBACK (1<<8)
+#define CACR_DCM_OFF_PRE (2<<8)
+#define CACR_DCM_OFF_IMP (3<<8)
+#define CACR_DW (1<<5)
+
+#define ACR_BASE_POS 24
+#define ACR_MASK_POS 16
+#define ACR_ENABLE (1<<15)
+#define ACR_USER (0<<13)
+#define ACR_SUPER (1<<13)
+#define ACR_ANY (2<<13)
+#define ACR_CM_WTHRU (0<<5)
+#define ACR_CM_WBACK (1<<5)
+#define ACR_CM_OFF_PRE (2<<5)
+#define ACR_CM_OFF_IMP (3<<5)
+#define ACR_WPROTECT (1<<2)
+
+/*********************************************************************
+ *
+ * Inter-IC (I2C) Module
+ *
+ *********************************************************************/
+
+/* Read/Write access macros for general use */
+#define MCF532x_I2C_I2ADR (volatile u8 *) (0xFC058000) // Address
+#define MCF532x_I2C_I2FDR (volatile u8 *) (0xFC058004) // Freq Divider
+#define MCF532x_I2C_I2CR (volatile u8 *) (0xFC058008) // Control
+#define MCF532x_I2C_I2SR (volatile u8 *) (0xFC05800C) // Status
+#define MCF532x_I2C_I2DR (volatile u8 *) (0xFC058010) // Data I/O
+
+/* Bit level definitions and macros */
+#define MCF532x_I2C_I2ADR_ADDR(x) (((x)&0x7F)<<0x01)
+
+#define MCF532x_I2C_I2FDR_IC(x) (((x)&0x3F))
+
+#define MCF532x_I2C_I2CR_IEN (0x80) // I2C enable
+#define MCF532x_I2C_I2CR_IIEN (0x40) // interrupt enable
+#define MCF532x_I2C_I2CR_MSTA (0x20) // master/slave mode
+#define MCF532x_I2C_I2CR_MTX (0x10) // transmit/receive mode
+#define MCF532x_I2C_I2CR_TXAK (0x08) // transmit acknowledge enable
+#define MCF532x_I2C_I2CR_RSTA (0x04) // repeat start
+
+#define MCF532x_I2C_I2SR_ICF (0x80) // data transfer bit
+#define MCF532x_I2C_I2SR_IAAS (0x40) // I2C addressed as a slave
+#define MCF532x_I2C_I2SR_IBB (0x20) // I2C bus busy
+#define MCF532x_I2C_I2SR_IAL (0x10) // aribitration lost
+#define MCF532x_I2C_I2SR_SRW (0x04) // slave read/write
+#define MCF532x_I2C_I2SR_IIF (0x02) // I2C interrupt
+#define MCF532x_I2C_I2SR_RXAK (0x01) // received acknowledge
+
+#define MCF532x_PAR_FECI2C (volatile u8 *) (0xFC0A4053)
+
+
+/*
+ * The M5329EVB board needs a help getting its devices initialized
+ * at kernel start time if dBUG doesn't set it up (for example
+ * it is not used), so we need to do it manually.
+ */
+#ifdef __ASSEMBLER__
+.macro m5329EVB_setup
+ movel #0xFC098000, %a7
+ movel #0x0, (%a7)
+#define CORE_SRAM 0x80000000
+#define CORE_SRAM_SIZE 0x8000
+ movel #CORE_SRAM, %d0
+ addl #0x221, %d0
+ movec %d0,%RAMBAR1
+ movel #CORE_SRAM, %sp
+ addl #CORE_SRAM_SIZE, %sp
+ jsr sysinit
+.endm
+#define PLATFORM_SETUP m5329EVB_setup
+
+#endif /* __ASSEMBLER__ */
+
+/*********************************************************************
+ *
+ * Chip Configuration Module (CCM)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_CCM_CCR MCF_REG16(0xFC0A0004)
+#define MCF_CCM_RCON MCF_REG16(0xFC0A0008)
+#define MCF_CCM_CIR MCF_REG16(0xFC0A000A)
+#define MCF_CCM_MISCCR MCF_REG16(0xFC0A0010)
+#define MCF_CCM_CDR MCF_REG16(0xFC0A0012)
+#define MCF_CCM_UHCSR MCF_REG16(0xFC0A0014)
+#define MCF_CCM_UOCSR MCF_REG16(0xFC0A0016)
+
+/* Bit definitions and macros for MCF_CCM_CCR */
+#define MCF_CCM_CCR_RESERVED (0x0001)
+#define MCF_CCM_CCR_PLL_MODE (0x0003)
+#define MCF_CCM_CCR_OSC_MODE (0x0005)
+#define MCF_CCM_CCR_BOOTPS(x) (((x)&0x0003)<<3|0x0001)
+#define MCF_CCM_CCR_LOAD (0x0021)
+#define MCF_CCM_CCR_LIMP (0x0041)
+#define MCF_CCM_CCR_CSC(x) (((x)&0x0003)<<8|0x0001)
+
+/* Bit definitions and macros for MCF_CCM_RCON */
+#define MCF_CCM_RCON_RESERVED (0x0001)
+#define MCF_CCM_RCON_PLL_MODE (0x0003)
+#define MCF_CCM_RCON_OSC_MODE (0x0005)
+#define MCF_CCM_RCON_BOOTPS(x) (((x)&0x0003)<<3|0x0001)
+#define MCF_CCM_RCON_LOAD (0x0021)
+#define MCF_CCM_RCON_LIMP (0x0041)
+#define MCF_CCM_RCON_CSC(x) (((x)&0x0003)<<8|0x0001)
+
+/* Bit definitions and macros for MCF_CCM_CIR */
+#define MCF_CCM_CIR_PRN(x) (((x)&0x003F)<<0)
+#define MCF_CCM_CIR_PIN(x) (((x)&0x03FF)<<6)
+
+/* Bit definitions and macros for MCF_CCM_MISCCR */
+#define MCF_CCM_MISCCR_USBSRC (0x0001)
+#define MCF_CCM_MISCCR_USBDIV (0x0002)
+#define MCF_CCM_MISCCR_SSI_SRC (0x0010)
+#define MCF_CCM_MISCCR_TIM_DMA (0x0020)
+#define MCF_CCM_MISCCR_SSI_PUS (0x0040)
+#define MCF_CCM_MISCCR_SSI_PUE (0x0080)
+#define MCF_CCM_MISCCR_LCD_CHEN (0x0100)
+#define MCF_CCM_MISCCR_LIMP (0x1000)
+#define MCF_CCM_MISCCR_PLL_LOCK (0x2000)
+
+/* Bit definitions and macros for MCF_CCM_CDR */
+#define MCF_CCM_CDR_SSIDIV(x) (((x)&0x000F)<<0)
+#define MCF_CCM_CDR_LPDIV(x) (((x)&0x000F)<<8)
+
+/* Bit definitions and macros for MCF_CCM_UHCSR */
+#define MCF_CCM_UHCSR_XPDE (0x0001)
+#define MCF_CCM_UHCSR_UHMIE (0x0002)
+#define MCF_CCM_UHCSR_WKUP (0x0004)
+#define MCF_CCM_UHCSR_PORTIND(x) (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_CCM_UOCSR */
+#define MCF_CCM_UOCSR_XPDE (0x0001)
+#define MCF_CCM_UOCSR_UOMIE (0x0002)
+#define MCF_CCM_UOCSR_WKUP (0x0004)
+#define MCF_CCM_UOCSR_PWRFLT (0x0008)
+#define MCF_CCM_UOCSR_SEND (0x0010)
+#define MCF_CCM_UOCSR_VVLD (0x0020)
+#define MCF_CCM_UOCSR_BVLD (0x0040)
+#define MCF_CCM_UOCSR_AVLD (0x0080)
+#define MCF_CCM_UOCSR_DPPU (0x0100)
+#define MCF_CCM_UOCSR_DCR_VBUS (0x0200)
+#define MCF_CCM_UOCSR_CRG_VBUS (0x0400)
+#define MCF_CCM_UOCSR_DRV_VBUS (0x0800)
+#define MCF_CCM_UOCSR_DMPD (0x1000)
+#define MCF_CCM_UOCSR_DPPD (0x2000)
+#define MCF_CCM_UOCSR_PORTIND(x) (((x)&0x0003)<<14)
+
+/*********************************************************************
+ *
+ * DMA Timers (DTIM)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_DTIM0_DTMR MCF_REG16(0xFC070000)
+#define MCF_DTIM0_DTXMR MCF_REG08(0xFC070002)
+#define MCF_DTIM0_DTER MCF_REG08(0xFC070003)
+#define MCF_DTIM0_DTRR MCF_REG32(0xFC070004)
+#define MCF_DTIM0_DTCR MCF_REG32(0xFC070008)
+#define MCF_DTIM0_DTCN MCF_REG32(0xFC07000C)
+#define MCF_DTIM1_DTMR MCF_REG16(0xFC074000)
+#define MCF_DTIM1_DTXMR MCF_REG08(0xFC074002)
+#define MCF_DTIM1_DTER MCF_REG08(0xFC074003)
+#define MCF_DTIM1_DTRR MCF_REG32(0xFC074004)
+#define MCF_DTIM1_DTCR MCF_REG32(0xFC074008)
+#define MCF_DTIM1_DTCN MCF_REG32(0xFC07400C)
+#define MCF_DTIM2_DTMR MCF_REG16(0xFC078000)
+#define MCF_DTIM2_DTXMR MCF_REG08(0xFC078002)
+#define MCF_DTIM2_DTER MCF_REG08(0xFC078003)
+#define MCF_DTIM2_DTRR MCF_REG32(0xFC078004)
+#define MCF_DTIM2_DTCR MCF_REG32(0xFC078008)
+#define MCF_DTIM2_DTCN MCF_REG32(0xFC07800C)
+#define MCF_DTIM3_DTMR MCF_REG16(0xFC07C000)
+#define MCF_DTIM3_DTXMR MCF_REG08(0xFC07C002)
+#define MCF_DTIM3_DTER MCF_REG08(0xFC07C003)
+#define MCF_DTIM3_DTRR MCF_REG32(0xFC07C004)
+#define MCF_DTIM3_DTCR MCF_REG32(0xFC07C008)
+#define MCF_DTIM3_DTCN MCF_REG32(0xFC07C00C)
+#define MCF_DTIM_DTMR(x) MCF_REG16(0xFC070000+((x)*0x4000))
+#define MCF_DTIM_DTXMR(x) MCF_REG08(0xFC070002+((x)*0x4000))
+#define MCF_DTIM_DTER(x) MCF_REG08(0xFC070003+((x)*0x4000))
+#define MCF_DTIM_DTRR(x) MCF_REG32(0xFC070004+((x)*0x4000))
+#define MCF_DTIM_DTCR(x) MCF_REG32(0xFC070008+((x)*0x4000))
+#define MCF_DTIM_DTCN(x) MCF_REG32(0xFC07000C+((x)*0x4000))
+
+/* Bit definitions and macros for MCF_DTIM_DTMR */
+#define MCF_DTIM_DTMR_RST (0x0001)
+#define MCF_DTIM_DTMR_CLK(x) (((x)&0x0003)<<1)
+#define MCF_DTIM_DTMR_FRR (0x0008)
+#define MCF_DTIM_DTMR_ORRI (0x0010)
+#define MCF_DTIM_DTMR_OM (0x0020)
+#define MCF_DTIM_DTMR_CE(x) (((x)&0x0003)<<6)
+#define MCF_DTIM_DTMR_PS(x) (((x)&0x00FF)<<8)
+#define MCF_DTIM_DTMR_CE_ANY (0x00C0)
+#define MCF_DTIM_DTMR_CE_FALL (0x0080)
+#define MCF_DTIM_DTMR_CE_RISE (0x0040)
+#define MCF_DTIM_DTMR_CE_NONE (0x0000)
+#define MCF_DTIM_DTMR_CLK_DTIN (0x0006)
+#define MCF_DTIM_DTMR_CLK_DIV16 (0x0004)
+#define MCF_DTIM_DTMR_CLK_DIV1 (0x0002)
+#define MCF_DTIM_DTMR_CLK_STOP (0x0000)
+
+/* Bit definitions and macros for MCF_DTIM_DTXMR */
+#define MCF_DTIM_DTXMR_MODE16 (0x01)
+#define MCF_DTIM_DTXMR_DMAEN (0x80)
+
+/* Bit definitions and macros for MCF_DTIM_DTER */
+#define MCF_DTIM_DTER_CAP (0x01)
+#define MCF_DTIM_DTER_REF (0x02)
+
+/* Bit definitions and macros for MCF_DTIM_DTRR */
+#define MCF_DTIM_DTRR_REF(x) (((x)&0xFFFFFFFF)<<0)
+
+/* Bit definitions and macros for MCF_DTIM_DTCR */
+#define MCF_DTIM_DTCR_CAP(x) (((x)&0xFFFFFFFF)<<0)
+
+/* Bit definitions and macros for MCF_DTIM_DTCN */
+#define MCF_DTIM_DTCN_CNT(x) (((x)&0xFFFFFFFF)<<0)
+
+/*********************************************************************
+ *
+ * FlexBus Chip Selects (FBCS)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_FBCS0_CSAR MCF_REG32(0xFC008000)
+#define MCF_FBCS0_CSMR MCF_REG32(0xFC008004)
+#define MCF_FBCS0_CSCR MCF_REG32(0xFC008008)
+#define MCF_FBCS1_CSAR MCF_REG32(0xFC00800C)
+#define MCF_FBCS1_CSMR MCF_REG32(0xFC008010)
+#define MCF_FBCS1_CSCR MCF_REG32(0xFC008014)
+#define MCF_FBCS2_CSAR MCF_REG32(0xFC008018)
+#define MCF_FBCS2_CSMR MCF_REG32(0xFC00801C)
+#define MCF_FBCS2_CSCR MCF_REG32(0xFC008020)
+#define MCF_FBCS3_CSAR MCF_REG32(0xFC008024)
+#define MCF_FBCS3_CSMR MCF_REG32(0xFC008028)
+#define MCF_FBCS3_CSCR MCF_REG32(0xFC00802C)
+#define MCF_FBCS4_CSAR MCF_REG32(0xFC008030)
+#define MCF_FBCS4_CSMR MCF_REG32(0xFC008034)
+#define MCF_FBCS4_CSCR MCF_REG32(0xFC008038)
+#define MCF_FBCS5_CSAR MCF_REG32(0xFC00803C)
+#define MCF_FBCS5_CSMR MCF_REG32(0xFC008040)
+#define MCF_FBCS5_CSCR MCF_REG32(0xFC008044)
+#define MCF_FBCS_CSAR(x) MCF_REG32(0xFC008000+((x)*0x00C))
+#define MCF_FBCS_CSMR(x) MCF_REG32(0xFC008004+((x)*0x00C))
+#define MCF_FBCS_CSCR(x) MCF_REG32(0xFC008008+((x)*0x00C))
+
+/* Bit definitions and macros for MCF_FBCS_CSAR */
+#define MCF_FBCS_CSAR_BA(x) ((x)&0xFFFF0000)
+
+/* Bit definitions and macros for MCF_FBCS_CSMR */
+#define MCF_FBCS_CSMR_V (0x00000001)
+#define MCF_FBCS_CSMR_WP (0x00000100)
+#define MCF_FBCS_CSMR_BAM(x) (((x)&0x0000FFFF)<<16)
+#define MCF_FBCS_CSMR_BAM_4G (0xFFFF0000)
+#define MCF_FBCS_CSMR_BAM_2G (0x7FFF0000)
+#define MCF_FBCS_CSMR_BAM_1G (0x3FFF0000)
+#define MCF_FBCS_CSMR_BAM_1024M (0x3FFF0000)
+#define MCF_FBCS_CSMR_BAM_512M (0x1FFF0000)
+#define MCF_FBCS_CSMR_BAM_256M (0x0FFF0000)
+#define MCF_FBCS_CSMR_BAM_128M (0x07FF0000)
+#define MCF_FBCS_CSMR_BAM_64M (0x03FF0000)
+#define MCF_FBCS_CSMR_BAM_32M (0x01FF0000)
+#define MCF_FBCS_CSMR_BAM_16M (0x00FF0000)
+#define MCF_FBCS_CSMR_BAM_8M (0x007F0000)
+#define MCF_FBCS_CSMR_BAM_4M (0x003F0000)
+#define MCF_FBCS_CSMR_BAM_2M (0x001F0000)
+#define MCF_FBCS_CSMR_BAM_1M (0x000F0000)
+#define MCF_FBCS_CSMR_BAM_1024K (0x000F0000)
+#define MCF_FBCS_CSMR_BAM_512K (0x00070000)
+#define MCF_FBCS_CSMR_BAM_256K (0x00030000)
+#define MCF_FBCS_CSMR_BAM_128K (0x00010000)
+#define MCF_FBCS_CSMR_BAM_64K (0x00000000)
+
+/* Bit definitions and macros for MCF_FBCS_CSCR */
+#define MCF_FBCS_CSCR_BSTW (0x00000008)
+#define MCF_FBCS_CSCR_BSTR (0x00000010)
+#define MCF_FBCS_CSCR_BEM (0x00000020)
+#define MCF_FBCS_CSCR_PS(x) (((x)&0x00000003)<<6)
+#define MCF_FBCS_CSCR_AA (0x00000100)
+#define MCF_FBCS_CSCR_SBM (0x00000200)
+#define MCF_FBCS_CSCR_WS(x) (((x)&0x0000003F)<<10)
+#define MCF_FBCS_CSCR_WRAH(x) (((x)&0x00000003)<<16)
+#define MCF_FBCS_CSCR_RDAH(x) (((x)&0x00000003)<<18)
+#define MCF_FBCS_CSCR_ASET(x) (((x)&0x00000003)<<20)
+#define MCF_FBCS_CSCR_SWSEN (0x00800000)
+#define MCF_FBCS_CSCR_SWS(x) (((x)&0x0000003F)<<26)
+#define MCF_FBCS_CSCR_PS_8 (0x0040)
+#define MCF_FBCS_CSCR_PS_16 (0x0080)
+#define MCF_FBCS_CSCR_PS_32 (0x0000)
+
+/*********************************************************************
+ *
+ * General Purpose I/O (GPIO)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_GPIO_PODR_FECH MCF_REG08(0xFC0A4000)
+#define MCF_GPIO_PODR_FECL MCF_REG08(0xFC0A4001)
+#define MCF_GPIO_PODR_SSI MCF_REG08(0xFC0A4002)
+#define MCF_GPIO_PODR_BUSCTL MCF_REG08(0xFC0A4003)
+#define MCF_GPIO_PODR_BE MCF_REG08(0xFC0A4004)
+#define MCF_GPIO_PODR_CS MCF_REG08(0xFC0A4005)
+#define MCF_GPIO_PODR_PWM MCF_REG08(0xFC0A4006)
+#define MCF_GPIO_PODR_FECI2C MCF_REG08(0xFC0A4007)
+#define MCF_GPIO_PODR_UART MCF_REG08(0xFC0A4009)
+#define MCF_GPIO_PODR_QSPI MCF_REG08(0xFC0A400A)
+#define MCF_GPIO_PODR_TIMER MCF_REG08(0xFC0A400B)
+#define MCF_GPIO_PODR_LCDDATAH MCF_REG08(0xFC0A400D)
+#define MCF_GPIO_PODR_LCDDATAM MCF_REG08(0xFC0A400E)
+#define MCF_GPIO_PODR_LCDDATAL MCF_REG08(0xFC0A400F)
+#define MCF_GPIO_PODR_LCDCTLH MCF_REG08(0xFC0A4010)
+#define MCF_GPIO_PODR_LCDCTLL MCF_REG08(0xFC0A4011)
+#define MCF_GPIO_PDDR_FECH MCF_REG08(0xFC0A4014)
+#define MCF_GPIO_PDDR_FECL MCF_REG08(0xFC0A4015)
+#define MCF_GPIO_PDDR_SSI MCF_REG08(0xFC0A4016)
+#define MCF_GPIO_PDDR_BUSCTL MCF_REG08(0xFC0A4017)
+#define MCF_GPIO_PDDR_BE MCF_REG08(0xFC0A4018)
+#define MCF_GPIO_PDDR_CS MCF_REG08(0xFC0A4019)
+#define MCF_GPIO_PDDR_PWM MCF_REG08(0xFC0A401A)
+#define MCF_GPIO_PDDR_FECI2C MCF_REG08(0xFC0A401B)
+#define MCF_GPIO_PDDR_UART MCF_REG08(0xFC0A401C)
+#define MCF_GPIO_PDDR_QSPI MCF_REG08(0xFC0A401E)
+#define MCF_GPIO_PDDR_TIMER MCF_REG08(0xFC0A401F)
+#define MCF_GPIO_PDDR_LCDDATAH MCF_REG08(0xFC0A4021)
+#define MCF_GPIO_PDDR_LCDDATAM MCF_REG08(0xFC0A4022)
+#define MCF_GPIO_PDDR_LCDDATAL MCF_REG08(0xFC0A4023)
+#define MCF_GPIO_PDDR_LCDCTLH MCF_REG08(0xFC0A4024)
+#define MCF_GPIO_PDDR_LCDCTLL MCF_REG08(0xFC0A4025)
+#define MCF_GPIO_PPDSDR_FECH MCF_REG08(0xFC0A4028)
+#define MCF_GPIO_PPDSDR_FECL MCF_REG08(0xFC0A4029)
+#define MCF_GPIO_PPDSDR_SSI MCF_REG08(0xFC0A402A)
+#define MCF_GPIO_PPDSDR_BUSCTL MCF_REG08(0xFC0A402B)
+#define MCF_GPIO_PPDSDR_BE MCF_REG08(0xFC0A402C)
+#define MCF_GPIO_PPDSDR_CS MCF_REG08(0xFC0A402D)
+#define MCF_GPIO_PPDSDR_PWM MCF_REG08(0xFC0A402E)
+#define MCF_GPIO_PPDSDR_FECI2C MCF_REG08(0xFC0A402F)
+#define MCF_GPIO_PPDSDR_UART MCF_REG08(0xFC0A4031)
+#define MCF_GPIO_PPDSDR_QSPI MCF_REG08(0xFC0A4032)
+#define MCF_GPIO_PPDSDR_TIMER MCF_REG08(0xFC0A4033)
+#define MCF_GPIO_PPDSDR_LCDDATAH MCF_REG08(0xFC0A4035)
+#define MCF_GPIO_PPDSDR_LCDDATAM MCF_REG08(0xFC0A4036)
+#define MCF_GPIO_PPDSDR_LCDDATAL MCF_REG08(0xFC0A4037)
+#define MCF_GPIO_PPDSDR_LCDCTLH MCF_REG08(0xFC0A4038)
+#define MCF_GPIO_PPDSDR_LCDCTLL MCF_REG08(0xFC0A4039)
+#define MCF_GPIO_PCLRR_FECH MCF_REG08(0xFC0A403C)
+#define MCF_GPIO_PCLRR_FECL MCF_REG08(0xFC0A403D)
+#define MCF_GPIO_PCLRR_SSI MCF_REG08(0xFC0A403E)
+#define MCF_GPIO_PCLRR_BUSCTL MCF_REG08(0xFC0A403F)
+#define MCF_GPIO_PCLRR_BE MCF_REG08(0xFC0A4040)
+#define MCF_GPIO_PCLRR_CS MCF_REG08(0xFC0A4041)
+#define MCF_GPIO_PCLRR_PWM MCF_REG08(0xFC0A4042)
+#define MCF_GPIO_PCLRR_FECI2C MCF_REG08(0xFC0A4043)
+#define MCF_GPIO_PCLRR_UART MCF_REG08(0xFC0A4045)
+#define MCF_GPIO_PCLRR_QSPI MCF_REG08(0xFC0A4046)
+#define MCF_GPIO_PCLRR_TIMER MCF_REG08(0xFC0A4047)
+#define MCF_GPIO_PCLRR_LCDDATAH MCF_REG08(0xFC0A4049)
+#define MCF_GPIO_PCLRR_LCDDATAM MCF_REG08(0xFC0A404A)
+#define MCF_GPIO_PCLRR_LCDDATAL MCF_REG08(0xFC0A404B)
+#define MCF_GPIO_PCLRR_LCDCTLH MCF_REG08(0xFC0A404C)
+#define MCF_GPIO_PCLRR_LCDCTLL MCF_REG08(0xFC0A404D)
+#define MCF_GPIO_PAR_FEC MCF_REG08(0xFC0A4050)
+#define MCF_GPIO_PAR_PWM MCF_REG08(0xFC0A4051)
+#define MCF_GPIO_PAR_BUSCTL MCF_REG08(0xFC0A4052)
+#define MCF_GPIO_PAR_FECI2C MCF_REG08(0xFC0A4053)
+#define MCF_GPIO_PAR_BE MCF_REG08(0xFC0A4054)
+#define MCF_GPIO_PAR_CS MCF_REG08(0xFC0A4055)
+#define MCF_GPIO_PAR_SSI MCF_REG16(0xFC0A4056)
+#define MCF_GPIO_PAR_UART MCF_REG16(0xFC0A4058)
+#define MCF_GPIO_PAR_QSPI MCF_REG16(0xFC0A405A)
+#define MCF_GPIO_PAR_TIMER MCF_REG08(0xFC0A405C)
+#define MCF_GPIO_PAR_LCDDATA MCF_REG08(0xFC0A405D)
+#define MCF_GPIO_PAR_LCDCTL MCF_REG16(0xFC0A405E)
+#define MCF_GPIO_PAR_IRQ MCF_REG16(0xFC0A4060)
+#define MCF_GPIO_MSCR_FLEXBUS MCF_REG08(0xFC0A4064)
+#define MCF_GPIO_MSCR_SDRAM MCF_REG08(0xFC0A4065)
+#define MCF_GPIO_DSCR_I2C MCF_REG08(0xFC0A4068)
+#define MCF_GPIO_DSCR_PWM MCF_REG08(0xFC0A4069)
+#define MCF_GPIO_DSCR_FEC MCF_REG08(0xFC0A406A)
+#define MCF_GPIO_DSCR_UART MCF_REG08(0xFC0A406B)
+#define MCF_GPIO_DSCR_QSPI MCF_REG08(0xFC0A406C)
+#define MCF_GPIO_DSCR_TIMER MCF_REG08(0xFC0A406D)
+#define MCF_GPIO_DSCR_SSI MCF_REG08(0xFC0A406E)
+#define MCF_GPIO_DSCR_LCD MCF_REG08(0xFC0A406F)
+#define MCF_GPIO_DSCR_DEBUG MCF_REG08(0xFC0A4070)
+#define MCF_GPIO_DSCR_CLKRST MCF_REG08(0xFC0A4071)
+#define MCF_GPIO_DSCR_IRQ MCF_REG08(0xFC0A4072)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECH */
+#define MCF_GPIO_PODR_FECH_PODR_FECH0 (0x01)
+#define MCF_GPIO_PODR_FECH_PODR_FECH1 (0x02)
+#define MCF_GPIO_PODR_FECH_PODR_FECH2 (0x04)
+#define MCF_GPIO_PODR_FECH_PODR_FECH3 (0x08)
+#define MCF_GPIO_PODR_FECH_PODR_FECH4 (0x10)
+#define MCF_GPIO_PODR_FECH_PODR_FECH5 (0x20)
+#define MCF_GPIO_PODR_FECH_PODR_FECH6 (0x40)
+#define MCF_GPIO_PODR_FECH_PODR_FECH7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECL */
+#define MCF_GPIO_PODR_FECL_PODR_FECL0 (0x01)
+#define MCF_GPIO_PODR_FECL_PODR_FECL1 (0x02)
+#define MCF_GPIO_PODR_FECL_PODR_FECL2 (0x04)
+#define MCF_GPIO_PODR_FECL_PODR_FECL3 (0x08)
+#define MCF_GPIO_PODR_FECL_PODR_FECL4 (0x10)
+#define MCF_GPIO_PODR_FECL_PODR_FECL5 (0x20)
+#define MCF_GPIO_PODR_FECL_PODR_FECL6 (0x40)
+#define MCF_GPIO_PODR_FECL_PODR_FECL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_SSI */
+#define MCF_GPIO_PODR_SSI_PODR_SSI0 (0x01)
+#define MCF_GPIO_PODR_SSI_PODR_SSI1 (0x02)
+#define MCF_GPIO_PODR_SSI_PODR_SSI2 (0x04)
+#define MCF_GPIO_PODR_SSI_PODR_SSI3 (0x08)
+#define MCF_GPIO_PODR_SSI_PODR_SSI4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_BUSCTL */
+#define MCF_GPIO_PODR_BUSCTL_POSDR_BUSCTL0 (0x01)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL1 (0x02)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL2 (0x04)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_BE */
+#define MCF_GPIO_PODR_BE_PODR_BE0 (0x01)
+#define MCF_GPIO_PODR_BE_PODR_BE1 (0x02)
+#define MCF_GPIO_PODR_BE_PODR_BE2 (0x04)
+#define MCF_GPIO_PODR_BE_PODR_BE3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_CS */
+#define MCF_GPIO_PODR_CS_PODR_CS1 (0x02)
+#define MCF_GPIO_PODR_CS_PODR_CS2 (0x04)
+#define MCF_GPIO_PODR_CS_PODR_CS3 (0x08)
+#define MCF_GPIO_PODR_CS_PODR_CS4 (0x10)
+#define MCF_GPIO_PODR_CS_PODR_CS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PWM */
+#define MCF_GPIO_PODR_PWM_PODR_PWM2 (0x04)
+#define MCF_GPIO_PODR_PWM_PODR_PWM3 (0x08)
+#define MCF_GPIO_PODR_PWM_PODR_PWM4 (0x10)
+#define MCF_GPIO_PODR_PWM_PODR_PWM5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECI2C */
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C0 (0x01)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C1 (0x02)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C2 (0x04)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_UART */
+#define MCF_GPIO_PODR_UART_PODR_UART0 (0x01)
+#define MCF_GPIO_PODR_UART_PODR_UART1 (0x02)
+#define MCF_GPIO_PODR_UART_PODR_UART2 (0x04)
+#define MCF_GPIO_PODR_UART_PODR_UART3 (0x08)
+#define MCF_GPIO_PODR_UART_PODR_UART4 (0x10)
+#define MCF_GPIO_PODR_UART_PODR_UART5 (0x20)
+#define MCF_GPIO_PODR_UART_PODR_UART6 (0x40)
+#define MCF_GPIO_PODR_UART_PODR_UART7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_QSPI */
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI0 (0x01)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI1 (0x02)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI2 (0x04)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI3 (0x08)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI4 (0x10)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_TIMER */
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER0 (0x01)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER1 (0x02)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER2 (0x04)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAH */
+#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH0 (0x01)
+#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH1 (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAM */
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM0 (0x01)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM1 (0x02)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM2 (0x04)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM3 (0x08)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM4 (0x10)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM5 (0x20)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM6 (0x40)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAL */
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL0 (0x01)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL1 (0x02)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL2 (0x04)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL3 (0x08)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL4 (0x10)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL5 (0x20)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL6 (0x40)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLH */
+#define MCF_GPIO_PODR_LCDCTLH_PODR_LCDCTLH0 (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLL */
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL0 (0x01)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL1 (0x02)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL2 (0x04)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL3 (0x08)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL4 (0x10)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL5 (0x20)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL6 (0x40)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECH */
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH0 (0x01)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH1 (0x02)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH2 (0x04)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH3 (0x08)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH4 (0x10)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH5 (0x20)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH6 (0x40)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECL */
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL0 (0x01)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL1 (0x02)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL2 (0x04)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL3 (0x08)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL4 (0x10)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL5 (0x20)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL6 (0x40)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_SSI */
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI0 (0x01)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI1 (0x02)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI2 (0x04)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI3 (0x08)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_BUSCTL */
+#define MCF_GPIO_PDDR_BUSCTL_POSDR_BUSCTL0 (0x01)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL1 (0x02)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL2 (0x04)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_BE */
+#define MCF_GPIO_PDDR_BE_PDDR_BE0 (0x01)
+#define MCF_GPIO_PDDR_BE_PDDR_BE1 (0x02)
+#define MCF_GPIO_PDDR_BE_PDDR_BE2 (0x04)
+#define MCF_GPIO_PDDR_BE_PDDR_BE3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_CS */
+#define MCF_GPIO_PDDR_CS_PDDR_CS1 (0x02)
+#define MCF_GPIO_PDDR_CS_PDDR_CS2 (0x04)
+#define MCF_GPIO_PDDR_CS_PDDR_CS3 (0x08)
+#define MCF_GPIO_PDDR_CS_PDDR_CS4 (0x10)
+#define MCF_GPIO_PDDR_CS_PDDR_CS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PWM */
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM2 (0x04)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM3 (0x08)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM4 (0x10)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECI2C */
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C0 (0x01)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C1 (0x02)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C2 (0x04)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_UART */
+#define MCF_GPIO_PDDR_UART_PDDR_UART0 (0x01)
+#define MCF_GPIO_PDDR_UART_PDDR_UART1 (0x02)
+#define MCF_GPIO_PDDR_UART_PDDR_UART2 (0x04)
+#define MCF_GPIO_PDDR_UART_PDDR_UART3 (0x08)
+#define MCF_GPIO_PDDR_UART_PDDR_UART4 (0x10)
+#define MCF_GPIO_PDDR_UART_PDDR_UART5 (0x20)
+#define MCF_GPIO_PDDR_UART_PDDR_UART6 (0x40)
+#define MCF_GPIO_PDDR_UART_PDDR_UART7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_QSPI */
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI0 (0x01)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI1 (0x02)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI2 (0x04)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI3 (0x08)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI4 (0x10)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_TIMER */
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER0 (0x01)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER1 (0x02)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER2 (0x04)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAH */
+#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH0 (0x01)
+#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH1 (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAM */
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM0 (0x01)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM1 (0x02)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM2 (0x04)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM3 (0x08)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM4 (0x10)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM5 (0x20)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM6 (0x40)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAL */
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL0 (0x01)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL1 (0x02)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL2 (0x04)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL3 (0x08)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL4 (0x10)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL5 (0x20)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL6 (0x40)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLH */
+#define MCF_GPIO_PDDR_LCDCTLH_PDDR_LCDCTLH0 (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLL */
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL0 (0x01)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL1 (0x02)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL2 (0x04)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL3 (0x08)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL4 (0x10)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL5 (0x20)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL6 (0x40)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECH */
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH0 (0x01)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH1 (0x02)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH2 (0x04)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH3 (0x08)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH4 (0x10)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH5 (0x20)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH6 (0x40)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECL */
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL0 (0x01)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL1 (0x02)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL2 (0x04)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL3 (0x08)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL4 (0x10)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL5 (0x20)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL6 (0x40)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_SSI */
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI0 (0x01)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI1 (0x02)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI2 (0x04)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI3 (0x08)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_BUSCTL */
+#define MCF_GPIO_PPDSDR_BUSCTL_POSDR_BUSCTL0 (0x01)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL1 (0x02)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL2 (0x04)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_BE */
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE0 (0x01)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE1 (0x02)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE2 (0x04)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_CS */
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS1 (0x02)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS2 (0x04)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS3 (0x08)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS4 (0x10)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PWM */
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM2 (0x04)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM3 (0x08)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM4 (0x10)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECI2C */
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C0 (0x01)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C1 (0x02)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C2 (0x04)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_UART */
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART0 (0x01)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART1 (0x02)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART2 (0x04)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART3 (0x08)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART4 (0x10)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART5 (0x20)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART6 (0x40)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_QSPI */
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI0 (0x01)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI1 (0x02)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI2 (0x04)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI3 (0x08)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI4 (0x10)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_TIMER */
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER0 (0x01)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER1 (0x02)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER2 (0x04)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAH */
+#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH0 (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH1 (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAM */
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM0 (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM1 (0x02)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM2 (0x04)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM3 (0x08)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM4 (0x10)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM5 (0x20)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM6 (0x40)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAL */
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL0 (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL1 (0x02)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL2 (0x04)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL3 (0x08)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL4 (0x10)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL5 (0x20)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL6 (0x40)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLH */
+#define MCF_GPIO_PPDSDR_LCDCTLH_PPDSDR_LCDCTLH0 (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLL */
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL0 (0x01)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL1 (0x02)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL2 (0x04)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL3 (0x08)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL4 (0x10)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL5 (0x20)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL6 (0x40)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECH */
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH0 (0x01)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH1 (0x02)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH2 (0x04)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH3 (0x08)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH4 (0x10)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH5 (0x20)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH6 (0x40)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECL */
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL0 (0x01)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL1 (0x02)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL2 (0x04)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL3 (0x08)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL4 (0x10)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL5 (0x20)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL6 (0x40)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_SSI */
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI0 (0x01)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI1 (0x02)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI2 (0x04)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI3 (0x08)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_BUSCTL */
+#define MCF_GPIO_PCLRR_BUSCTL_POSDR_BUSCTL0 (0x01)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL1 (0x02)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL2 (0x04)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_BE */
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE0 (0x01)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE1 (0x02)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE2 (0x04)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_CS */
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS1 (0x02)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS2 (0x04)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS3 (0x08)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS4 (0x10)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PWM */
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM2 (0x04)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM3 (0x08)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM4 (0x10)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECI2C */
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C0 (0x01)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C1 (0x02)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C2 (0x04)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_UART */
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART0 (0x01)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART1 (0x02)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART2 (0x04)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART3 (0x08)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART4 (0x10)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART5 (0x20)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART6 (0x40)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_QSPI */
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI0 (0x01)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI1 (0x02)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI2 (0x04)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI3 (0x08)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI4 (0x10)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_TIMER */
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER0 (0x01)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER1 (0x02)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER2 (0x04)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAH */
+#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH0 (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH1 (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAM */
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM0 (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM1 (0x02)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM2 (0x04)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM3 (0x08)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM4 (0x10)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM5 (0x20)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM6 (0x40)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAL */
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL0 (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL1 (0x02)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL2 (0x04)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL3 (0x08)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL4 (0x10)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL5 (0x20)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL6 (0x40)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLH */
+#define MCF_GPIO_PCLRR_LCDCTLH_PCLRR_LCDCTLH0 (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLL */
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL0 (0x01)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL1 (0x02)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL2 (0x04)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL3 (0x08)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL4 (0x10)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL5 (0x20)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL6 (0x40)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FEC */
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_GPIO (0x00)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_URTS1 (0x04)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC (0x0C)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_GPIO (0x00)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_UART (0x01)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PWM */
+#define MCF_GPIO_PAR_PWM_PAR_PWM1(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_PWM_PAR_PWM3(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_PWM_PAR_PWM5 (0x10)
+#define MCF_GPIO_PAR_PWM_PAR_PWM7 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_BUSCTL */
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS(x) (((x)&0x03)<<3)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB (0x20)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA (0x40)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE (0x80)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE_GPIO (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE_OE (0x80)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA_GPIO (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA_TA (0x40)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_GPIO (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_RWB (0x20)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_GPIO (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_DACK0 (0x10)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_TS (0x18)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FECI2C */
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_GPIO (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_UTXD2 (0x40)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_SCL (0x80)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC (0xC0)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_GPIO (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_URXD2 (0x10)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_SDA (0x20)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO (0x30)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_GPIO (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 (0x04)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_SCL (0x0C)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_GPIO (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2 (0x02)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_SDA (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_BE */
+#define MCF_GPIO_PAR_BE_PAR_BE0 (0x01)
+#define MCF_GPIO_PAR_BE_PAR_BE1 (0x02)
+#define MCF_GPIO_PAR_BE_PAR_BE2 (0x04)
+#define MCF_GPIO_PAR_BE_PAR_BE3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_CS */
+#define MCF_GPIO_PAR_CS_PAR_CS1 (0x02)
+#define MCF_GPIO_PAR_CS_PAR_CS2 (0x04)
+#define MCF_GPIO_PAR_CS_PAR_CS3 (0x08)
+#define MCF_GPIO_PAR_CS_PAR_CS4 (0x10)
+#define MCF_GPIO_PAR_CS_PAR_CS5 (0x20)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_GPIO (0x00)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_SDCS1 (0x01)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_CS1 (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_SSI */
+#define MCF_GPIO_PAR_SSI_PAR_MCLK (0x0080)
+#define MCF_GPIO_PAR_SSI_PAR_TXD(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_SSI_PAR_RXD(x) (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_SSI_PAR_FS(x) (((x)&0x0003)<<12)
+#define MCF_GPIO_PAR_SSI_PAR_BCLK(x) (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_UART */
+#define MCF_GPIO_PAR_UART_PAR_UTXD0 (0x0001)
+#define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0002)
+#define MCF_GPIO_PAR_UART_PAR_URTS0 (0x0004)
+#define MCF_GPIO_PAR_UART_PAR_UCTS0 (0x0008)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_UART_PAR_URXD1(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_UART_PAR_URTS1(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1(x) (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_GPIO (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_SSI_BCLK (0x0800)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_ULPI_D7 (0x0400)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_UCTS1 (0x0C00)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_GPIO (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_SSI_FS (0x0200)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_ULPI_D6 (0x0100)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_URTS1 (0x0300)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_GPIO (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_SSI_RXD (0x0080)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_ULPI_D5 (0x0040)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_URXD1 (0x00C0)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_GPIO (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_SSI_TXD (0x0020)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_ULPI_D4 (0x0010)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_UTXD1 (0x0030)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_QSPI */
+#define MCF_GPIO_PAR_QSPI_PAR_SCK(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_QSPI_PAR_DOUT(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_QSPI_PAR_DIN(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS0(x) (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS1(x) (((x)&0x0003)<<12)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS2(x) (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_TIMER */
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_GPIO (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TOUT3 (0x80)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_URXD2 (0x40)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TIN3 (0xC0)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_GPIO (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TOUT2 (0x20)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_UTXD2 (0x10)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TIN2 (0x30)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_GPIO (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TOUT1 (0x08)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_DACK1 (0x04)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TIN1 (0x0C)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_GPIO (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TOUT0 (0x02)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_DREQ0 (0x01)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TIN0 (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_LCDDATA */
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD7_0(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD15_8(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD16(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD17(x) (((x)&0x03)<<6)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_LCDCTL */
+#define MCF_GPIO_PAR_LCDCTL_PAR_CLS (0x0001)
+#define MCF_GPIO_PAR_LCDCTL_PAR_PS (0x0002)
+#define MCF_GPIO_PAR_LCDCTL_PAR_REV (0x0004)
+#define MCF_GPIO_PAR_LCDCTL_PAR_SPL_SPR (0x0008)
+#define MCF_GPIO_PAR_LCDCTL_PAR_CONTRAST (0x0010)
+#define MCF_GPIO_PAR_LCDCTL_PAR_LSCLK (0x0020)
+#define MCF_GPIO_PAR_LCDCTL_PAR_LP_HSYNC (0x0040)
+#define MCF_GPIO_PAR_LCDCTL_PAR_FLM_VSYNC (0x0080)
+#define MCF_GPIO_PAR_LCDCTL_PAR_ACD_OE (0x0100)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_IRQ */
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ1(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ2(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ4(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ5(x) (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ6(x) (((x)&0x0003)<<12)
+
+/* Bit definitions and macros for MCF_GPIO_MSCR_FLEXBUS */
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_ADDRCTL(x) (((x)&0x03)<<0)
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DLOWER(x) (((x)&0x03)<<2)
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DUPPER(x) (((x)&0x03)<<4)
+
+/* Bit definitions and macros for MCF_GPIO_MSCR_SDRAM */
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDRAM(x) (((x)&0x03)<<0)
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLK(x) (((x)&0x03)<<2)
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLKB(x) (((x)&0x03)<<4)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_I2C */
+#define MCF_GPIO_DSCR_I2C_I2C_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_PWM */
+#define MCF_GPIO_DSCR_PWM_PWM_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_FEC */
+#define MCF_GPIO_DSCR_FEC_FEC_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_UART */
+#define MCF_GPIO_DSCR_UART_UART0_DSE(x) (((x)&0x03)<<0)
+#define MCF_GPIO_DSCR_UART_UART1_DSE(x) (((x)&0x03)<<2)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_QSPI */
+#define MCF_GPIO_DSCR_QSPI_QSPI_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_TIMER */
+#define MCF_GPIO_DSCR_TIMER_TIMER_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_SSI */
+#define MCF_GPIO_DSCR_SSI_SSI_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_LCD */
+#define MCF_GPIO_DSCR_LCD_LCD_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_DEBUG */
+#define MCF_GPIO_DSCR_DEBUG_DEBUG_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_CLKRST */
+#define MCF_GPIO_DSCR_CLKRST_CLKRST_DSE(x) (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_IRQ */
+#define MCF_GPIO_DSCR_IRQ_IRQ_DSE(x) (((x)&0x03)<<0)
+
+/*********************************************************************
+ *
+ * Interrupt Controller (INTC)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_INTC0_IPRH MCF_REG32(0xFC048000)
+#define MCF_INTC0_IPRL MCF_REG32(0xFC048004)
+#define MCF_INTC0_IMRH MCF_REG32(0xFC048008)
+#define MCF_INTC0_IMRL MCF_REG32(0xFC04800C)
+#define MCF_INTC0_INTFRCH MCF_REG32(0xFC048010)
+#define MCF_INTC0_INTFRCL MCF_REG32(0xFC048014)
+#define MCF_INTC0_ICONFIG MCF_REG16(0xFC04801A)
+#define MCF_INTC0_SIMR MCF_REG08(0xFC04801C)
+#define MCF_INTC0_CIMR MCF_REG08(0xFC04801D)
+#define MCF_INTC0_CLMASK MCF_REG08(0xFC04801E)
+#define MCF_INTC0_SLMASK MCF_REG08(0xFC04801F)
+#define MCF_INTC0_ICR0 MCF_REG08(0xFC048040)
+#define MCF_INTC0_ICR1 MCF_REG08(0xFC048041)
+#define MCF_INTC0_ICR2 MCF_REG08(0xFC048042)
+#define MCF_INTC0_ICR3 MCF_REG08(0xFC048043)
+#define MCF_INTC0_ICR4 MCF_REG08(0xFC048044)
+#define MCF_INTC0_ICR5 MCF_REG08(0xFC048045)
+#define MCF_INTC0_ICR6 MCF_REG08(0xFC048046)
+#define MCF_INTC0_ICR7 MCF_REG08(0xFC048047)
+#define MCF_INTC0_ICR8 MCF_REG08(0xFC048048)
+#define MCF_INTC0_ICR9 MCF_REG08(0xFC048049)
+#define MCF_INTC0_ICR10 MCF_REG08(0xFC04804A)
+#define MCF_INTC0_ICR11 MCF_REG08(0xFC04804B)
+#define MCF_INTC0_ICR12 MCF_REG08(0xFC04804C)
+#define MCF_INTC0_ICR13 MCF_REG08(0xFC04804D)
+#define MCF_INTC0_ICR14 MCF_REG08(0xFC04804E)
+#define MCF_INTC0_ICR15 MCF_REG08(0xFC04804F)
+#define MCF_INTC0_ICR16 MCF_REG08(0xFC048050)
+#define MCF_INTC0_ICR17 MCF_REG08(0xFC048051)
+#define MCF_INTC0_ICR18 MCF_REG08(0xFC048052)
+#define MCF_INTC0_ICR19 MCF_REG08(0xFC048053)
+#define MCF_INTC0_ICR20 MCF_REG08(0xFC048054)
+#define MCF_INTC0_ICR21 MCF_REG08(0xFC048055)
+#define MCF_INTC0_ICR22 MCF_REG08(0xFC048056)
+#define MCF_INTC0_ICR23 MCF_REG08(0xFC048057)
+#define MCF_INTC0_ICR24 MCF_REG08(0xFC048058)
+#define MCF_INTC0_ICR25 MCF_REG08(0xFC048059)
+#define MCF_INTC0_ICR26 MCF_REG08(0xFC04805A)
+#define MCF_INTC0_ICR27 MCF_REG08(0xFC04805B)
+#define MCF_INTC0_ICR28 MCF_REG08(0xFC04805C)
+#define MCF_INTC0_ICR29 MCF_REG08(0xFC04805D)
+#define MCF_INTC0_ICR30 MCF_REG08(0xFC04805E)
+#define MCF_INTC0_ICR31 MCF_REG08(0xFC04805F)
+#define MCF_INTC0_ICR32 MCF_REG08(0xFC048060)
+#define MCF_INTC0_ICR33 MCF_REG08(0xFC048061)
+#define MCF_INTC0_ICR34 MCF_REG08(0xFC048062)
+#define MCF_INTC0_ICR35 MCF_REG08(0xFC048063)
+#define MCF_INTC0_ICR36 MCF_REG08(0xFC048064)
+#define MCF_INTC0_ICR37 MCF_REG08(0xFC048065)
+#define MCF_INTC0_ICR38 MCF_REG08(0xFC048066)
+#define MCF_INTC0_ICR39 MCF_REG08(0xFC048067)
+#define MCF_INTC0_ICR40 MCF_REG08(0xFC048068)
+#define MCF_INTC0_ICR41 MCF_REG08(0xFC048069)
+#define MCF_INTC0_ICR42 MCF_REG08(0xFC04806A)
+#define MCF_INTC0_ICR43 MCF_REG08(0xFC04806B)
+#define MCF_INTC0_ICR44 MCF_REG08(0xFC04806C)
+#define MCF_INTC0_ICR45 MCF_REG08(0xFC04806D)
+#define MCF_INTC0_ICR46 MCF_REG08(0xFC04806E)
+#define MCF_INTC0_ICR47 MCF_REG08(0xFC04806F)
+#define MCF_INTC0_ICR48 MCF_REG08(0xFC048070)
+#define MCF_INTC0_ICR49 MCF_REG08(0xFC048071)
+#define MCF_INTC0_ICR50 MCF_REG08(0xFC048072)
+#define MCF_INTC0_ICR51 MCF_REG08(0xFC048073)
+#define MCF_INTC0_ICR52 MCF_REG08(0xFC048074)
+#define MCF_INTC0_ICR53 MCF_REG08(0xFC048075)
+#define MCF_INTC0_ICR54 MCF_REG08(0xFC048076)
+#define MCF_INTC0_ICR55 MCF_REG08(0xFC048077)
+#define MCF_INTC0_ICR56 MCF_REG08(0xFC048078)
+#define MCF_INTC0_ICR57 MCF_REG08(0xFC048079)
+#define MCF_INTC0_ICR58 MCF_REG08(0xFC04807A)
+#define MCF_INTC0_ICR59 MCF_REG08(0xFC04807B)
+#define MCF_INTC0_ICR60 MCF_REG08(0xFC04807C)
+#define MCF_INTC0_ICR61 MCF_REG08(0xFC04807D)
+#define MCF_INTC0_ICR62 MCF_REG08(0xFC04807E)
+#define MCF_INTC0_ICR63 MCF_REG08(0xFC04807F)
+#define MCF_INTC0_ICR(x) MCF_REG08(0xFC048040+((x)*0x001))
+#define MCF_INTC0_SWIACK MCF_REG08(0xFC0480E0)
+#define MCF_INTC0_L1IACK MCF_REG08(0xFC0480E4)
+#define MCF_INTC0_L2IACK MCF_REG08(0xFC0480E8)
+#define MCF_INTC0_L3IACK MCF_REG08(0xFC0480EC)
+#define MCF_INTC0_L4IACK MCF_REG08(0xFC0480F0)
+#define MCF_INTC0_L5IACK MCF_REG08(0xFC0480F4)
+#define MCF_INTC0_L6IACK MCF_REG08(0xFC0480F8)
+#define MCF_INTC0_L7IACK MCF_REG08(0xFC0480FC)
+#define MCF_INTC0_LIACK(x) MCF_REG08(0xFC0480E4+((x)*0x004))
+#define MCF_INTC1_IPRH MCF_REG32(0xFC04C000)
+#define MCF_INTC1_IPRL MCF_REG32(0xFC04C004)
+#define MCF_INTC1_IMRH MCF_REG32(0xFC04C008)
+#define MCF_INTC1_IMRL MCF_REG32(0xFC04C00C)
+#define MCF_INTC1_INTFRCH MCF_REG32(0xFC04C010)
+#define MCF_INTC1_INTFRCL MCF_REG32(0xFC04C014)
+#define MCF_INTC1_ICONFIG MCF_REG16(0xFC04C01A)
+#define MCF_INTC1_SIMR MCF_REG08(0xFC04C01C)
+#define MCF_INTC1_CIMR MCF_REG08(0xFC04C01D)
+#define MCF_INTC1_CLMASK MCF_REG08(0xFC04C01E)
+#define MCF_INTC1_SLMASK MCF_REG08(0xFC04C01F)
+#define MCF_INTC1_ICR0 MCF_REG08(0xFC04C040)
+#define MCF_INTC1_ICR1 MCF_REG08(0xFC04C041)
+#define MCF_INTC1_ICR2 MCF_REG08(0xFC04C042)
+#define MCF_INTC1_ICR3 MCF_REG08(0xFC04C043)
+#define MCF_INTC1_ICR4 MCF_REG08(0xFC04C044)
+#define MCF_INTC1_ICR5 MCF_REG08(0xFC04C045)
+#define MCF_INTC1_ICR6 MCF_REG08(0xFC04C046)
+#define MCF_INTC1_ICR7 MCF_REG08(0xFC04C047)
+#define MCF_INTC1_ICR8 MCF_REG08(0xFC04C048)
+#define MCF_INTC1_ICR9 MCF_REG08(0xFC04C049)
+#define MCF_INTC1_ICR10 MCF_REG08(0xFC04C04A)
+#define MCF_INTC1_ICR11 MCF_REG08(0xFC04C04B)
+#define MCF_INTC1_ICR12 MCF_REG08(0xFC04C04C)
+#define MCF_INTC1_ICR13 MCF_REG08(0xFC04C04D)
+#define MCF_INTC1_ICR14 MCF_REG08(0xFC04C04E)
+#define MCF_INTC1_ICR15 MCF_REG08(0xFC04C04F)
+#define MCF_INTC1_ICR16 MCF_REG08(0xFC04C050)
+#define MCF_INTC1_ICR17 MCF_REG08(0xFC04C051)
+#define MCF_INTC1_ICR18 MCF_REG08(0xFC04C052)
+#define MCF_INTC1_ICR19 MCF_REG08(0xFC04C053)
+#define MCF_INTC1_ICR20 MCF_REG08(0xFC04C054)
+#define MCF_INTC1_ICR21 MCF_REG08(0xFC04C055)
+#define MCF_INTC1_ICR22 MCF_REG08(0xFC04C056)
+#define MCF_INTC1_ICR23 MCF_REG08(0xFC04C057)
+#define MCF_INTC1_ICR24 MCF_REG08(0xFC04C058)
+#define MCF_INTC1_ICR25 MCF_REG08(0xFC04C059)
+#define MCF_INTC1_ICR26 MCF_REG08(0xFC04C05A)
+#define MCF_INTC1_ICR27 MCF_REG08(0xFC04C05B)
+#define MCF_INTC1_ICR28 MCF_REG08(0xFC04C05C)
+#define MCF_INTC1_ICR29 MCF_REG08(0xFC04C05D)
+#define MCF_INTC1_ICR30 MCF_REG08(0xFC04C05E)
+#define MCF_INTC1_ICR31 MCF_REG08(0xFC04C05F)
+#define MCF_INTC1_ICR32 MCF_REG08(0xFC04C060)
+#define MCF_INTC1_ICR33 MCF_REG08(0xFC04C061)
+#define MCF_INTC1_ICR34 MCF_REG08(0xFC04C062)
+#define MCF_INTC1_ICR35 MCF_REG08(0xFC04C063)
+#define MCF_INTC1_ICR36 MCF_REG08(0xFC04C064)
+#define MCF_INTC1_ICR37 MCF_REG08(0xFC04C065)
+#define MCF_INTC1_ICR38 MCF_REG08(0xFC04C066)
+#define MCF_INTC1_ICR39 MCF_REG08(0xFC04C067)
+#define MCF_INTC1_ICR40 MCF_REG08(0xFC04C068)
+#define MCF_INTC1_ICR41 MCF_REG08(0xFC04C069)
+#define MCF_INTC1_ICR42 MCF_REG08(0xFC04C06A)
+#define MCF_INTC1_ICR43 MCF_REG08(0xFC04C06B)
+#define MCF_INTC1_ICR44 MCF_REG08(0xFC04C06C)
+#define MCF_INTC1_ICR45 MCF_REG08(0xFC04C06D)
+#define MCF_INTC1_ICR46 MCF_REG08(0xFC04C06E)
+#define MCF_INTC1_ICR47 MCF_REG08(0xFC04C06F)
+#define MCF_INTC1_ICR48 MCF_REG08(0xFC04C070)
+#define MCF_INTC1_ICR49 MCF_REG08(0xFC04C071)
+#define MCF_INTC1_ICR50 MCF_REG08(0xFC04C072)
+#define MCF_INTC1_ICR51 MCF_REG08(0xFC04C073)
+#define MCF_INTC1_ICR52 MCF_REG08(0xFC04C074)
+#define MCF_INTC1_ICR53 MCF_REG08(0xFC04C075)
+#define MCF_INTC1_ICR54 MCF_REG08(0xFC04C076)
+#define MCF_INTC1_ICR55 MCF_REG08(0xFC04C077)
+#define MCF_INTC1_ICR56 MCF_REG08(0xFC04C078)
+#define MCF_INTC1_ICR57 MCF_REG08(0xFC04C079)
+#define MCF_INTC1_ICR58 MCF_REG08(0xFC04C07A)
+#define MCF_INTC1_ICR59 MCF_REG08(0xFC04C07B)
+#define MCF_INTC1_ICR60 MCF_REG08(0xFC04C07C)
+#define MCF_INTC1_ICR61 MCF_REG08(0xFC04C07D)
+#define MCF_INTC1_ICR62 MCF_REG08(0xFC04C07E)
+#define MCF_INTC1_ICR63 MCF_REG08(0xFC04C07F)
+#define MCF_INTC1_ICR(x) MCF_REG08(0xFC04C040+((x)*0x001))
+#define MCF_INTC1_SWIACK MCF_REG08(0xFC04C0E0)
+#define MCF_INTC1_L1IACK MCF_REG08(0xFC04C0E4)
+#define MCF_INTC1_L2IACK MCF_REG08(0xFC04C0E8)
+#define MCF_INTC1_L3IACK MCF_REG08(0xFC04C0EC)
+#define MCF_INTC1_L4IACK MCF_REG08(0xFC04C0F0)
+#define MCF_INTC1_L5IACK MCF_REG08(0xFC04C0F4)
+#define MCF_INTC1_L6IACK MCF_REG08(0xFC04C0F8)
+#define MCF_INTC1_L7IACK MCF_REG08(0xFC04C0FC)
+#define MCF_INTC1_LIACK(x) MCF_REG08(0xFC04C0E4+((x)*0x004))
+#define MCF_INTC_IPRH(x) MCF_REG32(0xFC048000+((x)*0x4000))
+#define MCF_INTC_IPRL(x) MCF_REG32(0xFC048004+((x)*0x4000))
+#define MCF_INTC_IMRH(x) MCF_REG32(0xFC048008+((x)*0x4000))
+#define MCF_INTC_IMRL(x) MCF_REG32(0xFC04800C+((x)*0x4000))
+#define MCF_INTC_INTFRCH(x) MCF_REG32(0xFC048010+((x)*0x4000))
+#define MCF_INTC_INTFRCL(x) MCF_REG32(0xFC048014+((x)*0x4000))
+#define MCF_INTC_ICONFIG(x) MCF_REG16(0xFC04801A+((x)*0x4000))
+#define MCF_INTC_SIMR(x) MCF_REG08(0xFC04801C+((x)*0x4000))
+#define MCF_INTC_CIMR(x) MCF_REG08(0xFC04801D+((x)*0x4000))
+#define MCF_INTC_CLMASK(x) MCF_REG08(0xFC04801E+((x)*0x4000))
+#define MCF_INTC_SLMASK(x) MCF_REG08(0xFC04801F+((x)*0x4000))
+#define MCF_INTC_ICR0(x) MCF_REG08(0xFC048040+((x)*0x4000))
+#define MCF_INTC_ICR1(x) MCF_REG08(0xFC048041+((x)*0x4000))
+#define MCF_INTC_ICR2(x) MCF_REG08(0xFC048042+((x)*0x4000))
+#define MCF_INTC_ICR3(x) MCF_REG08(0xFC048043+((x)*0x4000))
+#define MCF_INTC_ICR4(x) MCF_REG08(0xFC048044+((x)*0x4000))
+#define MCF_INTC_ICR5(x) MCF_REG08(0xFC048045+((x)*0x4000))
+#define MCF_INTC_ICR6(x) MCF_REG08(0xFC048046+((x)*0x4000))
+#define MCF_INTC_ICR7(x) MCF_REG08(0xFC048047+((x)*0x4000))
+#define MCF_INTC_ICR8(x) MCF_REG08(0xFC048048+((x)*0x4000))
+#define MCF_INTC_ICR9(x) MCF_REG08(0xFC048049+((x)*0x4000))
+#define MCF_INTC_ICR10(x) MCF_REG08(0xFC04804A+((x)*0x4000))
+#define MCF_INTC_ICR11(x) MCF_REG08(0xFC04804B+((x)*0x4000))
+#define MCF_INTC_ICR12(x) MCF_REG08(0xFC04804C+((x)*0x4000))
+#define MCF_INTC_ICR13(x) MCF_REG08(0xFC04804D+((x)*0x4000))
+#define MCF_INTC_ICR14(x) MCF_REG08(0xFC04804E+((x)*0x4000))
+#define MCF_INTC_ICR15(x) MCF_REG08(0xFC04804F+((x)*0x4000))
+#define MCF_INTC_ICR16(x) MCF_REG08(0xFC048050+((x)*0x4000))
+#define MCF_INTC_ICR17(x) MCF_REG08(0xFC048051+((x)*0x4000))
+#define MCF_INTC_ICR18(x) MCF_REG08(0xFC048052+((x)*0x4000))
+#define MCF_INTC_ICR19(x) MCF_REG08(0xFC048053+((x)*0x4000))
+#define MCF_INTC_ICR20(x) MCF_REG08(0xFC048054+((x)*0x4000))
+#define MCF_INTC_ICR21(x) MCF_REG08(0xFC048055+((x)*0x4000))
+#define MCF_INTC_ICR22(x) MCF_REG08(0xFC048056+((x)*0x4000))
+#define MCF_INTC_ICR23(x) MCF_REG08(0xFC048057+((x)*0x4000))
+#define MCF_INTC_ICR24(x) MCF_REG08(0xFC048058+((x)*0x4000))
+#define MCF_INTC_ICR25(x) MCF_REG08(0xFC048059+((x)*0x4000))
+#define MCF_INTC_ICR26(x) MCF_REG08(0xFC04805A+((x)*0x4000))
+#define MCF_INTC_ICR27(x) MCF_REG08(0xFC04805B+((x)*0x4000))
+#define MCF_INTC_ICR28(x) MCF_REG08(0xFC04805C+((x)*0x4000))
+#define MCF_INTC_ICR29(x) MCF_REG08(0xFC04805D+((x)*0x4000))
+#define MCF_INTC_ICR30(x) MCF_REG08(0xFC04805E+((x)*0x4000))
+#define MCF_INTC_ICR31(x) MCF_REG08(0xFC04805F+((x)*0x4000))
+#define MCF_INTC_ICR32(x) MCF_REG08(0xFC048060+((x)*0x4000))
+#define MCF_INTC_ICR33(x) MCF_REG08(0xFC048061+((x)*0x4000))
+#define MCF_INTC_ICR34(x) MCF_REG08(0xFC048062+((x)*0x4000))
+#define MCF_INTC_ICR35(x) MCF_REG08(0xFC048063+((x)*0x4000))
+#define MCF_INTC_ICR36(x) MCF_REG08(0xFC048064+((x)*0x4000))
+#define MCF_INTC_ICR37(x) MCF_REG08(0xFC048065+((x)*0x4000))
+#define MCF_INTC_ICR38(x) MCF_REG08(0xFC048066+((x)*0x4000))
+#define MCF_INTC_ICR39(x) MCF_REG08(0xFC048067+((x)*0x4000))
+#define MCF_INTC_ICR40(x) MCF_REG08(0xFC048068+((x)*0x4000))
+#define MCF_INTC_ICR41(x) MCF_REG08(0xFC048069+((x)*0x4000))
+#define MCF_INTC_ICR42(x) MCF_REG08(0xFC04806A+((x)*0x4000))
+#define MCF_INTC_ICR43(x) MCF_REG08(0xFC04806B+((x)*0x4000))
+#define MCF_INTC_ICR44(x) MCF_REG08(0xFC04806C+((x)*0x4000))
+#define MCF_INTC_ICR45(x) MCF_REG08(0xFC04806D+((x)*0x4000))
+#define MCF_INTC_ICR46(x) MCF_REG08(0xFC04806E+((x)*0x4000))
+#define MCF_INTC_ICR47(x) MCF_REG08(0xFC04806F+((x)*0x4000))
+#define MCF_INTC_ICR48(x) MCF_REG08(0xFC048070+((x)*0x4000))
+#define MCF_INTC_ICR49(x) MCF_REG08(0xFC048071+((x)*0x4000))
+#define MCF_INTC_ICR50(x) MCF_REG08(0xFC048072+((x)*0x4000))
+#define MCF_INTC_ICR51(x) MCF_REG08(0xFC048073+((x)*0x4000))
+#define MCF_INTC_ICR52(x) MCF_REG08(0xFC048074+((x)*0x4000))
+#define MCF_INTC_ICR53(x) MCF_REG08(0xFC048075+((x)*0x4000))
+#define MCF_INTC_ICR54(x) MCF_REG08(0xFC048076+((x)*0x4000))
+#define MCF_INTC_ICR55(x) MCF_REG08(0xFC048077+((x)*0x4000))
+#define MCF_INTC_ICR56(x) MCF_REG08(0xFC048078+((x)*0x4000))
+#define MCF_INTC_ICR57(x) MCF_REG08(0xFC048079+((x)*0x4000))
+#define MCF_INTC_ICR58(x) MCF_REG08(0xFC04807A+((x)*0x4000))
+#define MCF_INTC_ICR59(x) MCF_REG08(0xFC04807B+((x)*0x4000))
+#define MCF_INTC_ICR60(x) MCF_REG08(0xFC04807C+((x)*0x4000))
+#define MCF_INTC_ICR61(x) MCF_REG08(0xFC04807D+((x)*0x4000))
+#define MCF_INTC_ICR62(x) MCF_REG08(0xFC04807E+((x)*0x4000))
+#define MCF_INTC_ICR63(x) MCF_REG08(0xFC04807F+((x)*0x4000))
+#define MCF_INTC_SWIACK(x) MCF_REG08(0xFC0480E0+((x)*0x4000))
+#define MCF_INTC_L1IACK(x) MCF_REG08(0xFC0480E4+((x)*0x4000))
+#define MCF_INTC_L2IACK(x) MCF_REG08(0xFC0480E8+((x)*0x4000))
+#define MCF_INTC_L3IACK(x) MCF_REG08(0xFC0480EC+((x)*0x4000))
+#define MCF_INTC_L4IACK(x) MCF_REG08(0xFC0480F0+((x)*0x4000))
+#define MCF_INTC_L5IACK(x) MCF_REG08(0xFC0480F4+((x)*0x4000))
+#define MCF_INTC_L6IACK(x) MCF_REG08(0xFC0480F8+((x)*0x4000))
+#define MCF_INTC_L7IACK(x) MCF_REG08(0xFC0480FC+((x)*0x4000))
+
+/* Bit definitions and macros for MCF_INTC_IPRH */
+#define MCF_INTC_IPRH_INT32 (0x00000001)
+#define MCF_INTC_IPRH_INT33 (0x00000002)
+#define MCF_INTC_IPRH_INT34 (0x00000004)
+#define MCF_INTC_IPRH_INT35 (0x00000008)
+#define MCF_INTC_IPRH_INT36 (0x00000010)
+#define MCF_INTC_IPRH_INT37 (0x00000020)
+#define MCF_INTC_IPRH_INT38 (0x00000040)
+#define MCF_INTC_IPRH_INT39 (0x00000080)
+#define MCF_INTC_IPRH_INT40 (0x00000100)
+#define MCF_INTC_IPRH_INT41 (0x00000200)
+#define MCF_INTC_IPRH_INT42 (0x00000400)
+#define MCF_INTC_IPRH_INT43 (0x00000800)
+#define MCF_INTC_IPRH_INT44 (0x00001000)
+#define MCF_INTC_IPRH_INT45 (0x00002000)
+#define MCF_INTC_IPRH_INT46 (0x00004000)
+#define MCF_INTC_IPRH_INT47 (0x00008000)
+#define MCF_INTC_IPRH_INT48 (0x00010000)
+#define MCF_INTC_IPRH_INT49 (0x00020000)
+#define MCF_INTC_IPRH_INT50 (0x00040000)
+#define MCF_INTC_IPRH_INT51 (0x00080000)
+#define MCF_INTC_IPRH_INT52 (0x00100000)
+#define MCF_INTC_IPRH_INT53 (0x00200000)
+#define MCF_INTC_IPRH_INT54 (0x00400000)
+#define MCF_INTC_IPRH_INT55 (0x00800000)
+#define MCF_INTC_IPRH_INT56 (0x01000000)
+#define MCF_INTC_IPRH_INT57 (0x02000000)
+#define MCF_INTC_IPRH_INT58 (0x04000000)
+#define MCF_INTC_IPRH_INT59 (0x08000000)
+#define MCF_INTC_IPRH_INT60 (0x10000000)
+#define MCF_INTC_IPRH_INT61 (0x20000000)
+#define MCF_INTC_IPRH_INT62 (0x40000000)
+#define MCF_INTC_IPRH_INT63 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_IPRL */
+#define MCF_INTC_IPRL_INT0 (0x00000001)
+#define MCF_INTC_IPRL_INT1 (0x00000002)
+#define MCF_INTC_IPRL_INT2 (0x00000004)
+#define MCF_INTC_IPRL_INT3 (0x00000008)
+#define MCF_INTC_IPRL_INT4 (0x00000010)
+#define MCF_INTC_IPRL_INT5 (0x00000020)
+#define MCF_INTC_IPRL_INT6 (0x00000040)
+#define MCF_INTC_IPRL_INT7 (0x00000080)
+#define MCF_INTC_IPRL_INT8 (0x00000100)
+#define MCF_INTC_IPRL_INT9 (0x00000200)
+#define MCF_INTC_IPRL_INT10 (0x00000400)
+#define MCF_INTC_IPRL_INT11 (0x00000800)
+#define MCF_INTC_IPRL_INT12 (0x00001000)
+#define MCF_INTC_IPRL_INT13 (0x00002000)
+#define MCF_INTC_IPRL_INT14 (0x00004000)
+#define MCF_INTC_IPRL_INT15 (0x00008000)
+#define MCF_INTC_IPRL_INT16 (0x00010000)
+#define MCF_INTC_IPRL_INT17 (0x00020000)
+#define MCF_INTC_IPRL_INT18 (0x00040000)
+#define MCF_INTC_IPRL_INT19 (0x00080000)
+#define MCF_INTC_IPRL_INT20 (0x00100000)
+#define MCF_INTC_IPRL_INT21 (0x00200000)
+#define MCF_INTC_IPRL_INT22 (0x00400000)
+#define MCF_INTC_IPRL_INT23 (0x00800000)
+#define MCF_INTC_IPRL_INT24 (0x01000000)
+#define MCF_INTC_IPRL_INT25 (0x02000000)
+#define MCF_INTC_IPRL_INT26 (0x04000000)
+#define MCF_INTC_IPRL_INT27 (0x08000000)
+#define MCF_INTC_IPRL_INT28 (0x10000000)
+#define MCF_INTC_IPRL_INT29 (0x20000000)
+#define MCF_INTC_IPRL_INT30 (0x40000000)
+#define MCF_INTC_IPRL_INT31 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_IMRH */
+#define MCF_INTC_IMRH_INT_MASK32 (0x00000001)
+#define MCF_INTC_IMRH_INT_MASK33 (0x00000002)
+#define MCF_INTC_IMRH_INT_MASK34 (0x00000004)
+#define MCF_INTC_IMRH_INT_MASK35 (0x00000008)
+#define MCF_INTC_IMRH_INT_MASK36 (0x00000010)
+#define MCF_INTC_IMRH_INT_MASK37 (0x00000020)
+#define MCF_INTC_IMRH_INT_MASK38 (0x00000040)
+#define MCF_INTC_IMRH_INT_MASK39 (0x00000080)
+#define MCF_INTC_IMRH_INT_MASK40 (0x00000100)
+#define MCF_INTC_IMRH_INT_MASK41 (0x00000200)
+#define MCF_INTC_IMRH_INT_MASK42 (0x00000400)
+#define MCF_INTC_IMRH_INT_MASK43 (0x00000800)
+#define MCF_INTC_IMRH_INT_MASK44 (0x00001000)
+#define MCF_INTC_IMRH_INT_MASK45 (0x00002000)
+#define MCF_INTC_IMRH_INT_MASK46 (0x00004000)
+#define MCF_INTC_IMRH_INT_MASK47 (0x00008000)
+#define MCF_INTC_IMRH_INT_MASK48 (0x00010000)
+#define MCF_INTC_IMRH_INT_MASK49 (0x00020000)
+#define MCF_INTC_IMRH_INT_MASK50 (0x00040000)
+#define MCF_INTC_IMRH_INT_MASK51 (0x00080000)
+#define MCF_INTC_IMRH_INT_MASK52 (0x00100000)
+#define MCF_INTC_IMRH_INT_MASK53 (0x00200000)
+#define MCF_INTC_IMRH_INT_MASK54 (0x00400000)
+#define MCF_INTC_IMRH_INT_MASK55 (0x00800000)
+#define MCF_INTC_IMRH_INT_MASK56 (0x01000000)
+#define MCF_INTC_IMRH_INT_MASK57 (0x02000000)
+#define MCF_INTC_IMRH_INT_MASK58 (0x04000000)
+#define MCF_INTC_IMRH_INT_MASK59 (0x08000000)
+#define MCF_INTC_IMRH_INT_MASK60 (0x10000000)
+#define MCF_INTC_IMRH_INT_MASK61 (0x20000000)
+#define MCF_INTC_IMRH_INT_MASK62 (0x40000000)
+#define MCF_INTC_IMRH_INT_MASK63 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_IMRL */
+#define MCF_INTC_IMRL_INT_MASK0 (0x00000001)
+#define MCF_INTC_IMRL_INT_MASK1 (0x00000002)
+#define MCF_INTC_IMRL_INT_MASK2 (0x00000004)
+#define MCF_INTC_IMRL_INT_MASK3 (0x00000008)
+#define MCF_INTC_IMRL_INT_MASK4 (0x00000010)
+#define MCF_INTC_IMRL_INT_MASK5 (0x00000020)
+#define MCF_INTC_IMRL_INT_MASK6 (0x00000040)
+#define MCF_INTC_IMRL_INT_MASK7 (0x00000080)
+#define MCF_INTC_IMRL_INT_MASK8 (0x00000100)
+#define MCF_INTC_IMRL_INT_MASK9 (0x00000200)
+#define MCF_INTC_IMRL_INT_MASK10 (0x00000400)
+#define MCF_INTC_IMRL_INT_MASK11 (0x00000800)
+#define MCF_INTC_IMRL_INT_MASK12 (0x00001000)
+#define MCF_INTC_IMRL_INT_MASK13 (0x00002000)
+#define MCF_INTC_IMRL_INT_MASK14 (0x00004000)
+#define MCF_INTC_IMRL_INT_MASK15 (0x00008000)
+#define MCF_INTC_IMRL_INT_MASK16 (0x00010000)
+#define MCF_INTC_IMRL_INT_MASK17 (0x00020000)
+#define MCF_INTC_IMRL_INT_MASK18 (0x00040000)
+#define MCF_INTC_IMRL_INT_MASK19 (0x00080000)
+#define MCF_INTC_IMRL_INT_MASK20 (0x00100000)
+#define MCF_INTC_IMRL_INT_MASK21 (0x00200000)
+#define MCF_INTC_IMRL_INT_MASK22 (0x00400000)
+#define MCF_INTC_IMRL_INT_MASK23 (0x00800000)
+#define MCF_INTC_IMRL_INT_MASK24 (0x01000000)
+#define MCF_INTC_IMRL_INT_MASK25 (0x02000000)
+#define MCF_INTC_IMRL_INT_MASK26 (0x04000000)
+#define MCF_INTC_IMRL_INT_MASK27 (0x08000000)
+#define MCF_INTC_IMRL_INT_MASK28 (0x10000000)
+#define MCF_INTC_IMRL_INT_MASK29 (0x20000000)
+#define MCF_INTC_IMRL_INT_MASK30 (0x40000000)
+#define MCF_INTC_IMRL_INT_MASK31 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_INTFRCH */
+#define MCF_INTC_INTFRCH_INTFRC32 (0x00000001)
+#define MCF_INTC_INTFRCH_INTFRC33 (0x00000002)
+#define MCF_INTC_INTFRCH_INTFRC34 (0x00000004)
+#define MCF_INTC_INTFRCH_INTFRC35 (0x00000008)
+#define MCF_INTC_INTFRCH_INTFRC36 (0x00000010)
+#define MCF_INTC_INTFRCH_INTFRC37 (0x00000020)
+#define MCF_INTC_INTFRCH_INTFRC38 (0x00000040)
+#define MCF_INTC_INTFRCH_INTFRC39 (0x00000080)
+#define MCF_INTC_INTFRCH_INTFRC40 (0x00000100)
+#define MCF_INTC_INTFRCH_INTFRC41 (0x00000200)
+#define MCF_INTC_INTFRCH_INTFRC42 (0x00000400)
+#define MCF_INTC_INTFRCH_INTFRC43 (0x00000800)
+#define MCF_INTC_INTFRCH_INTFRC44 (0x00001000)
+#define MCF_INTC_INTFRCH_INTFRC45 (0x00002000)
+#define MCF_INTC_INTFRCH_INTFRC46 (0x00004000)
+#define MCF_INTC_INTFRCH_INTFRC47 (0x00008000)
+#define MCF_INTC_INTFRCH_INTFRC48 (0x00010000)
+#define MCF_INTC_INTFRCH_INTFRC49 (0x00020000)
+#define MCF_INTC_INTFRCH_INTFRC50 (0x00040000)
+#define MCF_INTC_INTFRCH_INTFRC51 (0x00080000)
+#define MCF_INTC_INTFRCH_INTFRC52 (0x00100000)
+#define MCF_INTC_INTFRCH_INTFRC53 (0x00200000)
+#define MCF_INTC_INTFRCH_INTFRC54 (0x00400000)
+#define MCF_INTC_INTFRCH_INTFRC55 (0x00800000)
+#define MCF_INTC_INTFRCH_INTFRC56 (0x01000000)
+#define MCF_INTC_INTFRCH_INTFRC57 (0x02000000)
+#define MCF_INTC_INTFRCH_INTFRC58 (0x04000000)
+#define MCF_INTC_INTFRCH_INTFRC59 (0x08000000)
+#define MCF_INTC_INTFRCH_INTFRC60 (0x10000000)
+#define MCF_INTC_INTFRCH_INTFRC61 (0x20000000)
+#define MCF_INTC_INTFRCH_INTFRC62 (0x40000000)
+#define MCF_INTC_INTFRCH_INTFRC63 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_INTFRCL */
+#define MCF_INTC_INTFRCL_INTFRC0 (0x00000001)
+#define MCF_INTC_INTFRCL_INTFRC1 (0x00000002)
+#define MCF_INTC_INTFRCL_INTFRC2 (0x00000004)
+#define MCF_INTC_INTFRCL_INTFRC3 (0x00000008)
+#define MCF_INTC_INTFRCL_INTFRC4 (0x00000010)
+#define MCF_INTC_INTFRCL_INTFRC5 (0x00000020)
+#define MCF_INTC_INTFRCL_INTFRC6 (0x00000040)
+#define MCF_INTC_INTFRCL_INTFRC7 (0x00000080)
+#define MCF_INTC_INTFRCL_INTFRC8 (0x00000100)
+#define MCF_INTC_INTFRCL_INTFRC9 (0x00000200)
+#define MCF_INTC_INTFRCL_INTFRC10 (0x00000400)
+#define MCF_INTC_INTFRCL_INTFRC11 (0x00000800)
+#define MCF_INTC_INTFRCL_INTFRC12 (0x00001000)
+#define MCF_INTC_INTFRCL_INTFRC13 (0x00002000)
+#define MCF_INTC_INTFRCL_INTFRC14 (0x00004000)
+#define MCF_INTC_INTFRCL_INTFRC15 (0x00008000)
+#define MCF_INTC_INTFRCL_INTFRC16 (0x00010000)
+#define MCF_INTC_INTFRCL_INTFRC17 (0x00020000)
+#define MCF_INTC_INTFRCL_INTFRC18 (0x00040000)
+#define MCF_INTC_INTFRCL_INTFRC19 (0x00080000)
+#define MCF_INTC_INTFRCL_INTFRC20 (0x00100000)
+#define MCF_INTC_INTFRCL_INTFRC21 (0x00200000)
+#define MCF_INTC_INTFRCL_INTFRC22 (0x00400000)
+#define MCF_INTC_INTFRCL_INTFRC23 (0x00800000)
+#define MCF_INTC_INTFRCL_INTFRC24 (0x01000000)
+#define MCF_INTC_INTFRCL_INTFRC25 (0x02000000)
+#define MCF_INTC_INTFRCL_INTFRC26 (0x04000000)
+#define MCF_INTC_INTFRCL_INTFRC27 (0x08000000)
+#define MCF_INTC_INTFRCL_INTFRC28 (0x10000000)
+#define MCF_INTC_INTFRCL_INTFRC29 (0x20000000)
+#define MCF_INTC_INTFRCL_INTFRC30 (0x40000000)
+#define MCF_INTC_INTFRCL_INTFRC31 (0x80000000)
+
+/* Bit definitions and macros for MCF_INTC_ICONFIG */
+#define MCF_INTC_ICONFIG_EMASK (0x0020)
+#define MCF_INTC_ICONFIG_ELVLPRI1 (0x0200)
+#define MCF_INTC_ICONFIG_ELVLPRI2 (0x0400)
+#define MCF_INTC_ICONFIG_ELVLPRI3 (0x0800)
+#define MCF_INTC_ICONFIG_ELVLPRI4 (0x1000)
+#define MCF_INTC_ICONFIG_ELVLPRI5 (0x2000)
+#define MCF_INTC_ICONFIG_ELVLPRI6 (0x4000)
+#define MCF_INTC_ICONFIG_ELVLPRI7 (0x8000)
+
+/* Bit definitions and macros for MCF_INTC_SIMR */
+#define MCF_INTC_SIMR_SIMR(x) (((x)&0x7F)<<0)
+
+/* Bit definitions and macros for MCF_INTC_CIMR */
+#define MCF_INTC_CIMR_CIMR(x) (((x)&0x7F)<<0)
+
+/* Bit definitions and macros for MCF_INTC_CLMASK */
+#define MCF_INTC_CLMASK_CLMASK(x) (((x)&0x0F)<<0)
+
+/* Bit definitions and macros for MCF_INTC_SLMASK */
+#define MCF_INTC_SLMASK_SLMASK(x) (((x)&0x0F)<<0)
+
+/* Bit definitions and macros for MCF_INTC_ICR */
+#define MCF_INTC_ICR_IL(x) (((x)&0x07)<<0)
+
+/* Bit definitions and macros for MCF_INTC_SWIACK */
+#define MCF_INTC_SWIACK_VECTOR(x) (((x)&0xFF)<<0)
+
+/* Bit definitions and macros for MCF_INTC_LIACK */
+#define MCF_INTC_LIACK_VECTOR(x) (((x)&0xFF)<<0)
+
+/********************************************************************/
+/*********************************************************************
+*
+* LCD Controller (LCDC)
+*
+*********************************************************************/
+
+/* Register read/write macros */
+#define MCF_LCDC_LSSAR MCF_REG32(0xFC0AC000)
+#define MCF_LCDC_LSR MCF_REG32(0xFC0AC004)
+#define MCF_LCDC_LVPWR MCF_REG32(0xFC0AC008)
+#define MCF_LCDC_LCPR MCF_REG32(0xFC0AC00C)
+#define MCF_LCDC_LCWHBR MCF_REG32(0xFC0AC010)
+#define MCF_LCDC_LCCMR MCF_REG32(0xFC0AC014)
+#define MCF_LCDC_LPCR MCF_REG32(0xFC0AC018)
+#define MCF_LCDC_LHCR MCF_REG32(0xFC0AC01C)
+#define MCF_LCDC_LVCR MCF_REG32(0xFC0AC020)
+#define MCF_LCDC_LPOR MCF_REG32(0xFC0AC024)
+#define MCF_LCDC_LSCR MCF_REG32(0xFC0AC028)
+#define MCF_LCDC_LPCCR MCF_REG32(0xFC0AC02C)
+#define MCF_LCDC_LDCR MCF_REG32(0xFC0AC030)
+#define MCF_LCDC_LRMCR MCF_REG32(0xFC0AC034)
+#define MCF_LCDC_LICR MCF_REG32(0xFC0AC038)
+#define MCF_LCDC_LIER MCF_REG32(0xFC0AC03C)
+#define MCF_LCDC_LISR MCF_REG32(0xFC0AC040)
+#define MCF_LCDC_LGWSAR MCF_REG32(0xFC0AC050)
+#define MCF_LCDC_LGWSR MCF_REG32(0xFC0AC054)
+#define MCF_LCDC_LGWVPWR MCF_REG32(0xFC0AC058)
+#define MCF_LCDC_LGWPOR MCF_REG32(0xFC0AC05C)
+#define MCF_LCDC_LGWPR MCF_REG32(0xFC0AC060)
+#define MCF_LCDC_LGWCR MCF_REG32(0xFC0AC064)
+#define MCF_LCDC_LGWDCR MCF_REG32(0xFC0AC068)
+#define MCF_LCDC_BPLUT_BASE MCF_REG32(0xFC0AC800)
+#define MCF_LCDC_GWLUT_BASE MCF_REG32(0xFC0ACC00)
+
+/* Bit definitions and macros for MCF_LCDC_LSSAR */
+#define MCF_LCDC_LSSAR_SSA(x) (((x)&0x3FFFFFFF)<<2)
+
+/* Bit definitions and macros for MCF_LCDC_LSR */
+#define MCF_LCDC_LSR_YMAX(x) (((x)&0x000003FF)<<0)
+#define MCF_LCDC_LSR_XMAX(x) (((x)&0x0000003F)<<20)
+
+/* Bit definitions and macros for MCF_LCDC_LVPWR */
+#define MCF_LCDC_LVPWR_VPW(x) (((x)&0x000003FF)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_LCPR */
+#define MCF_LCDC_LCPR_CYP(x) (((x)&0x000003FF)<<0)
+#define MCF_LCDC_LCPR_CXP(x) (((x)&0x000003FF)<<16)
+#define MCF_LCDC_LCPR_OP (0x10000000)
+#define MCF_LCDC_LCPR_CC(x) (((x)&0x00000003)<<30)
+#define MCF_LCDC_LCPR_CC_TRANSPARENT (0x00000000)
+#define MCF_LCDC_LCPR_CC_OR (0x40000000)
+#define MCF_LCDC_LCPR_CC_XOR (0x80000000)
+#define MCF_LCDC_LCPR_CC_AND (0xC0000000)
+#define MCF_LCDC_LCPR_OP_ON (0x10000000)
+#define MCF_LCDC_LCPR_OP_OFF (0x00000000)
+
+/* Bit definitions and macros for MCF_LCDC_LCWHBR */
+#define MCF_LCDC_LCWHBR_BD(x) (((x)&0x000000FF)<<0)
+#define MCF_LCDC_LCWHBR_CH(x) (((x)&0x0000001F)<<16)
+#define MCF_LCDC_LCWHBR_CW(x) (((x)&0x0000001F)<<24)
+#define MCF_LCDC_LCWHBR_BK_EN (0x80000000)
+#define MCF_LCDC_LCWHBR_BK_EN_ON (0x80000000)
+#define MCF_LCDC_LCWHBR_BK_EN_OFF (0x00000000)
+
+/* Bit definitions and macros for MCF_LCDC_LCCMR */
+#define MCF_LCDC_LCCMR_CUR_COL_B(x) (((x)&0x0000003F)<<0)
+#define MCF_LCDC_LCCMR_CUR_COL_G(x) (((x)&0x0000003F)<<6)
+#define MCF_LCDC_LCCMR_CUR_COL_R(x) (((x)&0x0000003F)<<12)
+
+/* Bit definitions and macros for MCF_LCDC_LPCR */
+#define MCF_LCDC_LPCR_PCD(x) (((x)&0x0000003F)<<0)
+#define MCF_LCDC_LPCR_SHARP (0x00000040)
+#define MCF_LCDC_LPCR_SCLKSEL (0x00000080)
+#define MCF_LCDC_LPCR_ACD(x) (((x)&0x0000007F)<<8)
+#define MCF_LCDC_LPCR_ACDSEL (0x00008000)
+#define MCF_LCDC_LPCR_REV_VS (0x00010000)
+#define MCF_LCDC_LPCR_SWAP_SEL (0x00020000)
+#define MCF_LCDC_LPCR_ENDSEL (0x00040000)
+#define MCF_LCDC_LPCR_SCLKIDLE (0x00080000)
+#define MCF_LCDC_LPCR_OEPOL (0x00100000)
+#define MCF_LCDC_LPCR_CLKPOL (0x00200000)
+#define MCF_LCDC_LPCR_LPPOL (0x00400000)
+#define MCF_LCDC_LPCR_FLM (0x00800000)
+#define MCF_LCDC_LPCR_PIXPOL (0x01000000)
+#define MCF_LCDC_LPCR_BPIX(x) (((x)&0x00000007)<<25)
+#define MCF_LCDC_LPCR_PBSIZ(x) (((x)&0x00000003)<<28)
+#define MCF_LCDC_LPCR_COLOR (0x40000000)
+#define MCF_LCDC_LPCR_TFT (0x80000000)
+#define MCF_LCDC_LPCR_MODE_MONOCGROME (0x00000000)
+#define MCF_LCDC_LPCR_MODE_CSTN (0x40000000)
+#define MCF_LCDC_LPCR_MODE_TFT (0xC0000000)
+#define MCF_LCDC_LPCR_PBSIZ_1 (0x00000000)
+#define MCF_LCDC_LPCR_PBSIZ_2 (0x10000000)
+#define MCF_LCDC_LPCR_PBSIZ_4 (0x20000000)
+#define MCF_LCDC_LPCR_PBSIZ_8 (0x30000000)
+#define MCF_LCDC_LPCR_BPIX_1bpp (0x00000000)
+#define MCF_LCDC_LPCR_BPIX_2bpp (0x02000000)
+#define MCF_LCDC_LPCR_BPIX_4bpp (0x04000000)
+#define MCF_LCDC_LPCR_BPIX_8bpp (0x06000000)
+#define MCF_LCDC_LPCR_BPIX_12bpp (0x08000000)
+#define MCF_LCDC_LPCR_BPIX_16bpp (0x0A000000)
+#define MCF_LCDC_LPCR_BPIX_18bpp (0x0C000000)
+
+#define MCF_LCDC_LPCR_PANEL_TYPE(x) (((x)&0x00000003)<<30)
+
+/* Bit definitions and macros for MCF_LCDC_LHCR */
+#define MCF_LCDC_LHCR_H_WAIT_2(x) (((x)&0x000000FF)<<0)
+#define MCF_LCDC_LHCR_H_WAIT_1(x) (((x)&0x000000FF)<<8)
+#define MCF_LCDC_LHCR_H_WIDTH(x) (((x)&0x0000003F)<<26)
+
+/* Bit definitions and macros for MCF_LCDC_LVCR */
+#define MCF_LCDC_LVCR_V_WAIT_2(x) (((x)&0x000000FF)<<0)
+#define MCF_LCDC_LVCR_V_WAIT_1(x) (((x)&0x000000FF)<<8)
+#define MCF_LCDC_LVCR_V_WIDTH(x) (((x)&0x0000003F)<<26)
+
+/* Bit definitions and macros for MCF_LCDC_LPOR */
+#define MCF_LCDC_LPOR_POS(x) (((x)&0x0000001F)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_LPCCR */
+#define MCF_LCDC_LPCCR_PW(x) (((x)&0x000000FF)<<0)
+#define MCF_LCDC_LPCCR_CC_EN (0x00000100)
+#define MCF_LCDC_LPCCR_SCR(x) (((x)&0x00000003)<<9)
+#define MCF_LCDC_LPCCR_LDMSK (0x00008000)
+#define MCF_LCDC_LPCCR_CLS_HI_WIDTH(x) (((x)&0x000001FF)<<16)
+#define MCF_LCDC_LPCCR_SCR_LINEPULSE (0x00000000)
+#define MCF_LCDC_LPCCR_SCR_PIXELCLK (0x00002000)
+#define MCF_LCDC_LPCCR_SCR_LCDCLOCK (0x00004000)
+
+/* Bit definitions and macros for MCF_LCDC_LDCR */
+#define MCF_LCDC_LDCR_TM(x) (((x)&0x0000001F)<<0)
+#define MCF_LCDC_LDCR_HM(x) (((x)&0x0000001F)<<16)
+#define MCF_LCDC_LDCR_BURST (0x80000000)
+
+/* Bit definitions and macros for MCF_LCDC_LRMCR */
+#define MCF_LCDC_LRMCR_SEL_REF (0x00000001)
+
+/* Bit definitions and macros for MCF_LCDC_LICR */
+#define MCF_LCDC_LICR_INTCON (0x00000001)
+#define MCF_LCDC_LICR_INTSYN (0x00000004)
+#define MCF_LCDC_LICR_GW_INT_CON (0x00000010)
+
+/* Bit definitions and macros for MCF_LCDC_LIER */
+#define MCF_LCDC_LIER_BOF_EN (0x00000001)
+#define MCF_LCDC_LIER_EOF_EN (0x00000002)
+#define MCF_LCDC_LIER_ERR_RES_EN (0x00000004)
+#define MCF_LCDC_LIER_UDR_ERR_EN (0x00000008)
+#define MCF_LCDC_LIER_GW_BOF_EN (0x00000010)
+#define MCF_LCDC_LIER_GW_EOF_EN (0x00000020)
+#define MCF_LCDC_LIER_GW_ERR_RES_EN (0x00000040)
+#define MCF_LCDC_LIER_GW_UDR_ERR_EN (0x00000080)
+
+/* Bit definitions and macros for MCF_LCDC_LISR */
+#define MCF_LCDC_LISR_BOF (0x00000001)
+#define MCF_LCDC_LISR_EOF (0x00000002)
+#define MCF_LCDC_LISR_ERR_RES (0x00000004)
+#define MCF_LCDC_LISR_UDR_ERR (0x00000008)
+#define MCF_LCDC_LISR_GW_BOF (0x00000010)
+#define MCF_LCDC_LISR_GW_EOF (0x00000020)
+#define MCF_LCDC_LISR_GW_ERR_RES (0x00000040)
+#define MCF_LCDC_LISR_GW_UDR_ERR (0x00000080)
+
+/* Bit definitions and macros for MCF_LCDC_LGWSAR */
+#define MCF_LCDC_LGWSAR_GWSA(x) (((x)&0x3FFFFFFF)<<2)
+
+/* Bit definitions and macros for MCF_LCDC_LGWSR */
+#define MCF_LCDC_LGWSR_GWH(x) (((x)&0x000003FF)<<0)
+#define MCF_LCDC_LGWSR_GWW(x) (((x)&0x0000003F)<<20)
+
+/* Bit definitions and macros for MCF_LCDC_LGWVPWR */
+#define MCF_LCDC_LGWVPWR_GWVPW(x) (((x)&0x000003FF)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_LGWPOR */
+#define MCF_LCDC_LGWPOR_GWPO(x) (((x)&0x0000001F)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_LGWPR */
+#define MCF_LCDC_LGWPR_GWYP(x) (((x)&0x000003FF)<<0)
+#define MCF_LCDC_LGWPR_GWXP(x) (((x)&0x000003FF)<<16)
+
+/* Bit definitions and macros for MCF_LCDC_LGWCR */
+#define MCF_LCDC_LGWCR_GWCKB(x) (((x)&0x0000003F)<<0)
+#define MCF_LCDC_LGWCR_GWCKG(x) (((x)&0x0000003F)<<6)
+#define MCF_LCDC_LGWCR_GWCKR(x) (((x)&0x0000003F)<<12)
+#define MCF_LCDC_LGWCR_GW_RVS (0x00200000)
+#define MCF_LCDC_LGWCR_GWE (0x00400000)
+#define MCF_LCDC_LGWCR_GWCKE (0x00800000)
+#define MCF_LCDC_LGWCR_GWAV(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_LCDC_LGWDCR */
+#define MCF_LCDC_LGWDCR_GWTM(x) (((x)&0x0000001F)<<0)
+#define MCF_LCDC_LGWDCR_GWHM(x) (((x)&0x0000001F)<<16)
+#define MCF_LCDC_LGWDCR_GWBT (0x80000000)
+
+/* Bit definitions and macros for MCF_LCDC_LSCR */
+#define MCF_LCDC_LSCR_PS_RISE_DELAY(x) (((x)&0x0000003F)<<26)
+#define MCF_LCDC_LSCR_CLS_RISE_DELAY(x) (((x)&0x000000FF)<<16)
+#define MCF_LCDC_LSCR_REV_TOGGLE_DELAY(x) (((x)&0x0000000F)<<8)
+#define MCF_LCDC_LSCR_GRAY_2(x) (((x)&0x0000000F)<<4)
+#define MCF_LCDC_LSCR_GRAY_1(x) (((x)&0x0000000F)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_BPLUT_BASE */
+#define MCF_LCDC_BPLUT_BASE_BASE(x) (((x)&0xFFFFFFFF)<<0)
+
+/* Bit definitions and macros for MCF_LCDC_GWLUT_BASE */
+#define MCF_LCDC_GWLUT_BASE_BASE(x) (((x)&0xFFFFFFFF)<<0)
+
+/*********************************************************************
+ *
+ * Phase Locked Loop (PLL)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_PLL_PODR MCF_REG08(0xFC0C0000)
+#define MCF_PLL_PLLCR MCF_REG08(0xFC0C0004)
+#define MCF_PLL_PMDR MCF_REG08(0xFC0C0008)
+#define MCF_PLL_PFDR MCF_REG08(0xFC0C000C)
+
+/* Bit definitions and macros for MCF_PLL_PODR */
+#define MCF_PLL_PODR_BUSDIV(x) (((x)&0x0F)<<0)
+#define MCF_PLL_PODR_CPUDIV(x) (((x)&0x0F)<<4)
+
+/* Bit definitions and macros for MCF_PLL_PLLCR */
+#define MCF_PLL_PLLCR_DITHDEV(x) (((x)&0x07)<<0)
+#define MCF_PLL_PLLCR_DITHEN (0x80)
+
+/* Bit definitions and macros for MCF_PLL_PMDR */
+#define MCF_PLL_PMDR_MODDIV(x) (((x)&0xFF)<<0)
+
+/* Bit definitions and macros for MCF_PLL_PFDR */
+#define MCF_PLL_PFDR_MFD(x) (((x)&0xFF)<<0)
+
+/*********************************************************************
+ *
+ * System Control Module Registers (SCM)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_SCM_MPR MCF_REG32(0xFC000000)
+#define MCF_SCM_PACRA MCF_REG32(0xFC000020)
+#define MCF_SCM_PACRB MCF_REG32(0xFC000024)
+#define MCF_SCM_PACRC MCF_REG32(0xFC000028)
+#define MCF_SCM_PACRD MCF_REG32(0xFC00002C)
+#define MCF_SCM_PACRE MCF_REG32(0xFC000040)
+#define MCF_SCM_PACRF MCF_REG32(0xFC000044)
+
+#define MCF_SCM_BCR MCF_REG32(0xFC040024)
+
+/*********************************************************************
+ *
+ * SDRAM Controller (SDRAMC)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_SDRAMC_SDMR MCF_REG32(0xFC0B8000)
+#define MCF_SDRAMC_SDCR MCF_REG32(0xFC0B8004)
+#define MCF_SDRAMC_SDCFG1 MCF_REG32(0xFC0B8008)
+#define MCF_SDRAMC_SDCFG2 MCF_REG32(0xFC0B800C)
+#define MCF_SDRAMC_LIMP_FIX MCF_REG32(0xFC0B8080)
+#define MCF_SDRAMC_SDDS MCF_REG32(0xFC0B8100)
+#define MCF_SDRAMC_SDCS0 MCF_REG32(0xFC0B8110)
+#define MCF_SDRAMC_SDCS1 MCF_REG32(0xFC0B8114)
+#define MCF_SDRAMC_SDCS2 MCF_REG32(0xFC0B8118)
+#define MCF_SDRAMC_SDCS3 MCF_REG32(0xFC0B811C)
+#define MCF_SDRAMC_SDCS(x) MCF_REG32(0xFC0B8110+((x)*0x004))
+
+/* Bit definitions and macros for MCF_SDRAMC_SDMR */
+#define MCF_SDRAMC_SDMR_CMD (0x00010000)
+#define MCF_SDRAMC_SDMR_AD(x) (((x)&0x00000FFF)<<18)
+#define MCF_SDRAMC_SDMR_BNKAD(x) (((x)&0x00000003)<<30)
+#define MCF_SDRAMC_SDMR_BNKAD_LMR (0x00000000)
+#define MCF_SDRAMC_SDMR_BNKAD_LEMR (0x40000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCR */
+#define MCF_SDRAMC_SDCR_IPALL (0x00000002)
+#define MCF_SDRAMC_SDCR_IREF (0x00000004)
+#define MCF_SDRAMC_SDCR_DQS_OE(x) (((x)&0x0000000F)<<8)
+#define MCF_SDRAMC_SDCR_PS(x) (((x)&0x00000003)<<12)
+#define MCF_SDRAMC_SDCR_RCNT(x) (((x)&0x0000003F)<<16)
+#define MCF_SDRAMC_SDCR_OE_RULE (0x00400000)
+#define MCF_SDRAMC_SDCR_MUX(x) (((x)&0x00000003)<<24)
+#define MCF_SDRAMC_SDCR_REF (0x10000000)
+#define MCF_SDRAMC_SDCR_DDR (0x20000000)
+#define MCF_SDRAMC_SDCR_CKE (0x40000000)
+#define MCF_SDRAMC_SDCR_MODE_EN (0x80000000)
+#define MCF_SDRAMC_SDCR_PS_16 (0x00002000)
+#define MCF_SDRAMC_SDCR_PS_32 (0x00000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCFG1 */
+#define MCF_SDRAMC_SDCFG1_WTLAT(x) (((x)&0x00000007)<<4)
+#define MCF_SDRAMC_SDCFG1_REF2ACT(x) (((x)&0x0000000F)<<8)
+#define MCF_SDRAMC_SDCFG1_PRE2ACT(x) (((x)&0x00000007)<<12)
+#define MCF_SDRAMC_SDCFG1_ACT2RW(x) (((x)&0x00000007)<<16)
+#define MCF_SDRAMC_SDCFG1_RDLAT(x) (((x)&0x0000000F)<<20)
+#define MCF_SDRAMC_SDCFG1_SWT2RD(x) (((x)&0x00000007)<<24)
+#define MCF_SDRAMC_SDCFG1_SRD2RW(x) (((x)&0x0000000F)<<28)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCFG2 */
+#define MCF_SDRAMC_SDCFG2_BL(x) (((x)&0x0000000F)<<16)
+#define MCF_SDRAMC_SDCFG2_BRD2WT(x) (((x)&0x0000000F)<<20)
+#define MCF_SDRAMC_SDCFG2_BWT2RW(x) (((x)&0x0000000F)<<24)
+#define MCF_SDRAMC_SDCFG2_BRD2PRE(x) (((x)&0x0000000F)<<28)
+
+/* Device Errata - LIMP mode work around */
+#define MCF_SDRAMC_REFRESH (0x40000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDDS */
+#define MCF_SDRAMC_SDDS_SB_D(x) (((x)&0x00000003)<<0)
+#define MCF_SDRAMC_SDDS_SB_S(x) (((x)&0x00000003)<<2)
+#define MCF_SDRAMC_SDDS_SB_A(x) (((x)&0x00000003)<<4)
+#define MCF_SDRAMC_SDDS_SB_C(x) (((x)&0x00000003)<<6)
+#define MCF_SDRAMC_SDDS_SB_E(x) (((x)&0x00000003)<<8)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCS */
+#define MCF_SDRAMC_SDCS_CSSZ(x) (((x)&0x0000001F)<<0)
+#define MCF_SDRAMC_SDCS_BASE(x) (((x)&0x00000FFF)<<20)
+#define MCF_SDRAMC_SDCS_BA(x) ((x)&0xFFF00000)
+#define MCF_SDRAMC_SDCS_CSSZ_DIABLE (0x00000000)
+#define MCF_SDRAMC_SDCS_CSSZ_1MBYTE (0x00000013)
+#define MCF_SDRAMC_SDCS_CSSZ_2MBYTE (0x00000014)
+#define MCF_SDRAMC_SDCS_CSSZ_4MBYTE (0x00000015)
+#define MCF_SDRAMC_SDCS_CSSZ_8MBYTE (0x00000016)
+#define MCF_SDRAMC_SDCS_CSSZ_16MBYTE (0x00000017)
+#define MCF_SDRAMC_SDCS_CSSZ_32MBYTE (0x00000018)
+#define MCF_SDRAMC_SDCS_CSSZ_64MBYTE (0x00000019)
+#define MCF_SDRAMC_SDCS_CSSZ_128MBYTE (0x0000001A)
+#define MCF_SDRAMC_SDCS_CSSZ_256MBYTE (0x0000001B)
+#define MCF_SDRAMC_SDCS_CSSZ_512MBYTE (0x0000001C)
+#define MCF_SDRAMC_SDCS_CSSZ_1GBYTE (0x0000001D)
+#define MCF_SDRAMC_SDCS_CSSZ_2GBYTE (0x0000001E)
+#define MCF_SDRAMC_SDCS_CSSZ_4GBYTE (0x0000001F)
+
+/*********************************************************************
+ *
+ * FlexCAN module registers
+ *
+ *********************************************************************/
+#define MCF_FLEXCAN_BASEADDR(x) (0xFC020000+(x)*0x0800)
+#define MCF_FLEXCAN_CANMCR(x) MCF_REG32(0xFC020000+(x)*0x0800+0x00)
+#define MCF_FLEXCAN_CANCTRL(x) MCF_REG32(0xFC020000+(x)*0x0800+0x04)
+#define MCF_FLEXCAN_TIMER(x) MCF_REG32(0xFC020000+(x)*0x0800+0x08)
+#define MCF_FLEXCAN_RXGMASK(x) MCF_REG32(0xFC020000+(x)*0x0800+0x10)
+#define MCF_FLEXCAN_RX14MASK(x) MCF_REG32(0xFC020000+(x)*0x0800+0x14)
+#define MCF_FLEXCAN_RX15MASK(x) MCF_REG32(0xFC020000+(x)*0x0800+0x18)
+#define MCF_FLEXCAN_ERRCNT(x) MCF_REG32(0xFC020000+(x)*0x0800+0x1C)
+#define MCF_FLEXCAN_ERRSTAT(x) MCF_REG32(0xFC020000+(x)*0x0800+0x20)
+#define MCF_FLEXCAN_IMASK(x) MCF_REG32(0xFC020000+(x)*0x0800+0x28)
+#define MCF_FLEXCAN_IFLAG(x) MCF_REG32(0xFC020000+(x)*0x0800+0x30)
+
+#define MCF_FLEXCAN_MB_CNT(x,y) MCF_REG32(0xFC020080+(x)*0x0800+(y)*0x10+0x0)
+#define MCF_FLEXCAN_MB_ID(x,y) MCF_REG32(0xFC020080+(x)*0x0800+(y)*0x10+0x4)
+#define MCF_FLEXCAN_MB_DB(x,y,z) MCF_REG08(0xFC020080+(x)*0x0800+(y)*0x10+0x8+(z)*0x1)
+
+/*
+ * FlexCAN Module Configuration Register
+ */
+#define CANMCR_MDIS (0x80000000)
+#define CANMCR_FRZ (0x40000000)
+#define CANMCR_HALT (0x10000000)
+#define CANMCR_SOFTRST (0x02000000)
+#define CANMCR_FRZACK (0x01000000)
+#define CANMCR_SUPV (0x00800000)
+#define CANMCR_MAXMB(x) ((x)&0x0F)
+
+/*
+ * FlexCAN Control Register
+ */
+#define CANCTRL_PRESDIV(x) (((x)&0xFF)<<24)
+#define CANCTRL_RJW(x) (((x)&0x03)<<22)
+#define CANCTRL_PSEG1(x) (((x)&0x07)<<19)
+#define CANCTRL_PSEG2(x) (((x)&0x07)<<16)
+#define CANCTRL_BOFFMSK (0x00008000)
+#define CANCTRL_ERRMSK (0x00004000)
+#define CANCTRL_CLKSRC (0x00002000)
+#define CANCTRL_LPB (0x00001000)
+#define CANCTRL_SAMP (0x00000080)
+#define CANCTRL_BOFFREC (0x00000040)
+#define CANCTRL_TSYNC (0x00000020)
+#define CANCTRL_LBUF (0x00000010)
+#define CANCTRL_LOM (0x00000008)
+#define CANCTRL_PROPSEG(x) ((x)&0x07)
+
+/*
+ * FlexCAN Error Counter Register
+ */
+#define ERRCNT_RXECTR(x) (((x)&0xFF)<<8)
+#define ERRCNT_TXECTR(x) ((x)&0xFF)
+
+/*
+ * FlexCAN Error and Status Register
+ */
+#define ERRSTAT_BITERR(x) (((x)&0x03)<<14)
+#define ERRSTAT_ACKERR (0x00002000)
+#define ERRSTAT_CRCERR (0x00001000)
+#define ERRSTAT_FRMERR (0x00000800)
+#define ERRSTAT_STFERR (0x00000400)
+#define ERRSTAT_TXWRN (0x00000200)
+#define ERRSTAT_RXWRN (0x00000100)
+#define ERRSTAT_IDLE (0x00000080)
+#define ERRSTAT_TXRX (0x00000040)
+#define ERRSTAT_FLTCONF(x) (((x)&0x03)<<4)
+#define ERRSTAT_BOFFINT (0x00000004)
+#define ERRSTAT_ERRINT (0x00000002)
+
+/*
+ * Interrupt Mask Register
+ */
+#define IMASK_BUF15M (0x8000)
+#define IMASK_BUF14M (0x4000)
+#define IMASK_BUF13M (0x2000)
+#define IMASK_BUF12M (0x1000)
+#define IMASK_BUF11M (0x0800)
+#define IMASK_BUF10M (0x0400)
+#define IMASK_BUF9M (0x0200)
+#define IMASK_BUF8M (0x0100)
+#define IMASK_BUF7M (0x0080)
+#define IMASK_BUF6M (0x0040)
+#define IMASK_BUF5M (0x0020)
+#define IMASK_BUF4M (0x0010)
+#define IMASK_BUF3M (0x0008)
+#define IMASK_BUF2M (0x0004)
+#define IMASK_BUF1M (0x0002)
+#define IMASK_BUF0M (0x0001)
+#define IMASK_BUFnM(x) (0x1<<(x))
+#define IMASK_BUFF_ENABLE_ALL (0x1111)
+#define IMASK_BUFF_DISABLE_ALL (0x0000)
+
+/*
+ * Interrupt Flag Register
+ */
+#define IFLAG_BUF15M (0x8000)
+#define IFLAG_BUF14M (0x4000)
+#define IFLAG_BUF13M (0x2000)
+#define IFLAG_BUF12M (0x1000)
+#define IFLAG_BUF11M (0x0800)
+#define IFLAG_BUF10M (0x0400)
+#define IFLAG_BUF9M (0x0200)
+#define IFLAG_BUF8M (0x0100)
+#define IFLAG_BUF7M (0x0080)
+#define IFLAG_BUF6M (0x0040)
+#define IFLAG_BUF5M (0x0020)
+#define IFLAG_BUF4M (0x0010)
+#define IFLAG_BUF3M (0x0008)
+#define IFLAG_BUF2M (0x0004)
+#define IFLAG_BUF1M (0x0002)
+#define IFLAG_BUF0M (0x0001)
+#define IFLAG_BUFF_SET_ALL (0xFFFF)
+#define IFLAG_BUFF_CLEAR_ALL (0x0000)
+#define IFLAG_BUFnM(x) (0x1<<(x))
+
+/*
+ * Message Buffers
+ */
+#define MB_CNT_CODE(x) (((x)&0x0F)<<24)
+#define MB_CNT_SRR (0x00400000)
+#define MB_CNT_IDE (0x00200000)
+#define MB_CNT_RTR (0x00100000)
+#define MB_CNT_LENGTH(x) (((x)&0x0F)<<16)
+#define MB_CNT_TIMESTAMP(x) ((x)&0xFFFF)
+#define MB_ID_STD(x) (((x)&0x07FF)<<18)
+#define MB_ID_EXT(x) ((x)&0x3FFFF)
+
+/*********************************************************************
+ *
+ * Edge Port Module (EPORT)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_EPORT_EPPAR MCF_REG16(0xFC094000)
+#define MCF_EPORT_EPDDR MCF_REG08(0xFC094002)
+#define MCF_EPORT_EPIER MCF_REG08(0xFC094003)
+#define MCF_EPORT_EPDR MCF_REG08(0xFC094004)
+#define MCF_EPORT_EPPDR MCF_REG08(0xFC094005)
+#define MCF_EPORT_EPFR MCF_REG08(0xFC094006)
+
+/* Bit definitions and macros for MCF_EPORT_EPPAR */
+#define MCF_EPORT_EPPAR_EPPA1(x) (((x)&0x0003)<<2)
+#define MCF_EPORT_EPPAR_EPPA2(x) (((x)&0x0003)<<4)
+#define MCF_EPORT_EPPAR_EPPA3(x) (((x)&0x0003)<<6)
+#define MCF_EPORT_EPPAR_EPPA4(x) (((x)&0x0003)<<8)
+#define MCF_EPORT_EPPAR_EPPA5(x) (((x)&0x0003)<<10)
+#define MCF_EPORT_EPPAR_EPPA6(x) (((x)&0x0003)<<12)
+#define MCF_EPORT_EPPAR_EPPA7(x) (((x)&0x0003)<<14)
+#define MCF_EPORT_EPPAR_LEVEL (0)
+#define MCF_EPORT_EPPAR_RISING (1)
+#define MCF_EPORT_EPPAR_FALLING (2)
+#define MCF_EPORT_EPPAR_BOTH (3)
+#define MCF_EPORT_EPPAR_EPPA7_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA7_RISING (0x4000)
+#define MCF_EPORT_EPPAR_EPPA7_FALLING (0x8000)
+#define MCF_EPORT_EPPAR_EPPA7_BOTH (0xC000)
+#define MCF_EPORT_EPPAR_EPPA6_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA6_RISING (0x1000)
+#define MCF_EPORT_EPPAR_EPPA6_FALLING (0x2000)
+#define MCF_EPORT_EPPAR_EPPA6_BOTH (0x3000)
+#define MCF_EPORT_EPPAR_EPPA5_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA5_RISING (0x0400)
+#define MCF_EPORT_EPPAR_EPPA5_FALLING (0x0800)
+#define MCF_EPORT_EPPAR_EPPA5_BOTH (0x0C00)
+#define MCF_EPORT_EPPAR_EPPA4_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA4_RISING (0x0100)
+#define MCF_EPORT_EPPAR_EPPA4_FALLING (0x0200)
+#define MCF_EPORT_EPPAR_EPPA4_BOTH (0x0300)
+#define MCF_EPORT_EPPAR_EPPA3_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA3_RISING (0x0040)
+#define MCF_EPORT_EPPAR_EPPA3_FALLING (0x0080)
+#define MCF_EPORT_EPPAR_EPPA3_BOTH (0x00C0)
+#define MCF_EPORT_EPPAR_EPPA2_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA2_RISING (0x0010)
+#define MCF_EPORT_EPPAR_EPPA2_FALLING (0x0020)
+#define MCF_EPORT_EPPAR_EPPA2_BOTH (0x0030)
+#define MCF_EPORT_EPPAR_EPPA1_LEVEL (0x0000)
+#define MCF_EPORT_EPPAR_EPPA1_RISING (0x0004)
+#define MCF_EPORT_EPPAR_EPPA1_FALLING (0x0008)
+#define MCF_EPORT_EPPAR_EPPA1_BOTH (0x000C)
+
+/* Bit definitions and macros for MCF_EPORT_EPDDR */
+#define MCF_EPORT_EPDDR_EPDD1 (0x02)
+#define MCF_EPORT_EPDDR_EPDD2 (0x04)
+#define MCF_EPORT_EPDDR_EPDD3 (0x08)
+#define MCF_EPORT_EPDDR_EPDD4 (0x10)
+#define MCF_EPORT_EPDDR_EPDD5 (0x20)
+#define MCF_EPORT_EPDDR_EPDD6 (0x40)
+#define MCF_EPORT_EPDDR_EPDD7 (0x80)
+
+/* Bit definitions and macros for MCF_EPORT_EPIER */
+#define MCF_EPORT_EPIER_EPIE1 (0x02)
+#define MCF_EPORT_EPIER_EPIE2 (0x04)
+#define MCF_EPORT_EPIER_EPIE3 (0x08)
+#define MCF_EPORT_EPIER_EPIE4 (0x10)
+#define MCF_EPORT_EPIER_EPIE5 (0x20)
+#define MCF_EPORT_EPIER_EPIE6 (0x40)
+#define MCF_EPORT_EPIER_EPIE7 (0x80)
+
+/* Bit definitions and macros for MCF_EPORT_EPDR */
+#define MCF_EPORT_EPDR_EPD1 (0x02)
+#define MCF_EPORT_EPDR_EPD2 (0x04)
+#define MCF_EPORT_EPDR_EPD3 (0x08)
+#define MCF_EPORT_EPDR_EPD4 (0x10)
+#define MCF_EPORT_EPDR_EPD5 (0x20)
+#define MCF_EPORT_EPDR_EPD6 (0x40)
+#define MCF_EPORT_EPDR_EPD7 (0x80)
+
+/* Bit definitions and macros for MCF_EPORT_EPPDR */
+#define MCF_EPORT_EPPDR_EPPD1 (0x02)
+#define MCF_EPORT_EPPDR_EPPD2 (0x04)
+#define MCF_EPORT_EPPDR_EPPD3 (0x08)
+#define MCF_EPORT_EPPDR_EPPD4 (0x10)
+#define MCF_EPORT_EPPDR_EPPD5 (0x20)
+#define MCF_EPORT_EPPDR_EPPD6 (0x40)
+#define MCF_EPORT_EPPDR_EPPD7 (0x80)
+
+/* Bit definitions and macros for MCF_EPORT_EPFR */
+#define MCF_EPORT_EPFR_EPF1 (0x02)
+#define MCF_EPORT_EPFR_EPF2 (0x04)
+#define MCF_EPORT_EPFR_EPF3 (0x08)
+#define MCF_EPORT_EPFR_EPF4 (0x10)
+#define MCF_EPORT_EPFR_EPF5 (0x20)
+#define MCF_EPORT_EPFR_EPF6 (0x40)
+#define MCF_EPORT_EPFR_EPF7 (0x80)
+
+/********************************************************************/
+#endif /* m532xsim_h */
diff --git a/include/asm-m68knommu/mcfcache.h b/include/asm-m68knommu/mcfcache.h
index 45d1ac57ea82..7b61a8a529f5 100644
--- a/include/asm-m68knommu/mcfcache.h
+++ b/include/asm-m68knommu/mcfcache.h
@@ -92,6 +92,21 @@
.endm
#endif /* CONFIG_M5249 || CONFIG_M5307 */
+#if defined(CONFIG_M532x)
+.macro CACHE_ENABLE
+ movel #0x01000000,%d0 /* invalidate cache cmd */
+ movec %d0,%CACR /* do invalidate cache */
+ nop
+ movel #0x4001C000,%d0 /* set SDRAM cached (write-thru) */
+ movec %d0,%ACR0
+ movel #0x00000000,%d0 /* no other regions cached */
+ movec %d0,%ACR1
+ movel #0x80000200,%d0 /* setup cache mask */
+ movec %d0,%CACR /* enable cache */
+ nop
+.endm
+#endif /* CONFIG_M532x */
+
#if defined(CONFIG_M5407)
/*
* Version 4 cores have a true harvard style separate instruction
diff --git a/include/asm-m68knommu/mcfpit.h b/include/asm-m68knommu/mcfpit.h
index 0d2672dd518a..f570cf64fd29 100644
--- a/include/asm-m68knommu/mcfpit.h
+++ b/include/asm-m68knommu/mcfpit.h
@@ -28,11 +28,9 @@
/*
* Define the PIT timer register set addresses.
*/
-struct mcfpit {
- unsigned short pcsr; /* PIT control and status */
- unsigned short pmr; /* PIT modulus register */
- unsigned short pcntr; /* PIT count register */
-} __attribute__((packed));
+#define MCFPIT_PCSR 0x0 /* PIT control register */
+#define MCFPIT_PMR 0x2 /* PIT modulus register */
+#define MCFPIT_PCNTR 0x4 /* PIT count register */
/*
* Bit definitions for the PIT Control and Status register.
diff --git a/include/asm-m68knommu/mcfsim.h b/include/asm-m68knommu/mcfsim.h
index 97a0c2734a72..1074ae717f74 100644
--- a/include/asm-m68knommu/mcfsim.h
+++ b/include/asm-m68knommu/mcfsim.h
@@ -35,6 +35,8 @@
#include <asm/m528xsim.h>
#elif defined(CONFIG_M5307)
#include <asm/m5307sim.h>
+#elif defined(CONFIG_M532x)
+#include <asm/m532xsim.h>
#elif defined(CONFIG_M5407)
#include <asm/m5407sim.h>
#endif
@@ -100,6 +102,7 @@
#define MCFSIM_IMR_MASKALL 0x3ffe /* All intr sources */
#endif
+
/*
* PIT interrupt settings, if not found in mXXXXsim.h file.
*/
diff --git a/include/asm-m68knommu/mcftimer.h b/include/asm-m68knommu/mcftimer.h
index 68bf33ac10d1..6f4d796e03db 100644
--- a/include/asm-m68knommu/mcftimer.h
+++ b/include/asm-m68knommu/mcftimer.h
@@ -3,7 +3,7 @@
/*
* mcftimer.h -- ColdFire internal TIMER support defines.
*
- * (C) Copyright 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 1999-2006, Greg Ungerer <gerg@snapgear.com>
* (C) Copyright 2000, Lineo Inc. (www.lineo.com)
*/
@@ -27,6 +27,11 @@
#elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
#define MCFTIMER_BASE1 0x140 /* Base address of TIMER1 */
#define MCFTIMER_BASE2 0x180 /* Base address of TIMER2 */
+#elif defined(CONFIG_M532x)
+#define MCFTIMER_BASE1 0xfc070000 /* Base address of TIMER1 */
+#define MCFTIMER_BASE2 0xfc074000 /* Base address of TIMER2 */
+#define MCFTIMER_BASE3 0xfc078000 /* Base address of TIMER3 */
+#define MCFTIMER_BASE4 0xfc07c000 /* Base address of TIMER4 */
#endif
@@ -34,23 +39,14 @@
* Define the TIMER register set addresses.
*/
#define MCFTIMER_TMR 0x00 /* Timer Mode reg (r/w) */
-#define MCFTIMER_TRR 0x02 /* Timer Reference (r/w) */
-#define MCFTIMER_TCR 0x04 /* Timer Capture reg (r/w) */
-#define MCFTIMER_TCN 0x06 /* Timer Counter reg (r/w) */
+#define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */
+#define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */
+#define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */
+#if defined(CONFIG_M532x)
+#define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */
+#else
#define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */
-
-struct mcftimer {
- unsigned short tmr; /* Timer Mode reg (r/w) */
- unsigned short reserved1;
- unsigned short trr; /* Timer Reference (r/w) */
- unsigned short reserved2;
- unsigned short tcr; /* Timer Capture reg (r/w) */
- unsigned short reserved3;
- unsigned short tcn; /* Timer Counter reg (r/w) */
- unsigned short reserved4;
- unsigned char reserved5;
- unsigned char ter; /* Timer Event reg (r/w) */
-} __attribute__((packed));
+#endif
/*
* Bit definitions for the Timer Mode Register (TMR).
diff --git a/include/asm-m68knommu/mcfuart.h b/include/asm-m68knommu/mcfuart.h
index 8040e43786be..dc0146c5258b 100644
--- a/include/asm-m68knommu/mcfuart.h
+++ b/include/asm-m68knommu/mcfuart.h
@@ -44,6 +44,10 @@
#define MCFUART_BASE1 0x60000 /* Base address of UART1 */
#define MCFUART_BASE2 0x64000 /* Base address of UART2 */
#define MCFUART_BASE3 0x68000 /* Base address of UART2 */
+#elif defined(CONFIG_M532x)
+#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
+#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
+#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
#endif
diff --git a/include/asm-m68knommu/page_offset.h b/include/asm-m68knommu/page_offset.h
index 8ed6d7b7d9d1..d4e73e0ba646 100644
--- a/include/asm-m68knommu/page_offset.h
+++ b/include/asm-m68knommu/page_offset.h
@@ -1,46 +1,5 @@
/* This handles the memory map.. */
-
-#ifdef CONFIG_COLDFIRE
-#if defined(CONFIG_SMALL)
-#define PAGE_OFFSET_RAW 0x30020000
-#elif defined(CONFIG_CFV240)
-#define PAGE_OFFSET_RAW 0x02000000
-#else
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-#endif
-
-#ifdef CONFIG_M68360
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-
-#ifdef CONFIG_PILOT
-#ifdef CONFIG_M68328
-#define PAGE_OFFSET_RAW 0x10000000
-#endif
-#ifdef CONFIG_M68EZ328
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-#endif
-#ifdef CONFIG_UCSIMM
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-
-#if defined(CONFIG_UCDIMM) || defined(CONFIG_DRAGEN2)
-#ifdef CONFIG_M68VZ328
-#define PAGE_OFFSET_RAW 0x00000000
-#endif /* CONFIG_M68VZ328 */
-#endif /* CONFIG_UCDIMM */
-
-#ifdef CONFIG_M68EZ328ADS
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-#ifdef CONFIG_ALMA_ANS
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
-#ifdef CONFIG_M68EN302
-#define PAGE_OFFSET_RAW 0x00000000
-#endif
+#define PAGE_OFFSET_RAW CONFIG_RAMBASE
diff --git a/include/asm-m68knommu/processor.h b/include/asm-m68knommu/processor.h
index 278b00bc60c5..0ee158e09abb 100644
--- a/include/asm-m68knommu/processor.h
+++ b/include/asm-m68knommu/processor.h
@@ -78,19 +78,31 @@ struct thread_struct {
}
/*
+ * Coldfire stacks need to be re-aligned on trap exit, conventional
+ * 68k can handle this case cleanly.
+ */
+#if defined(CONFIG_COLDFIRE)
+#define reformat(_regs) do { (_regs)->format = 0x4; } while(0)
+#else
+#define reformat(_regs) do { } while (0)
+#endif
+
+/*
* Do necessary setup to start up a newly executed thread.
*
* pass the data segment into user programs if it exists,
* it can't hurt anything as far as I can tell
*/
-#define start_thread(_regs, _pc, _usp) \
-do { \
- set_fs(USER_DS); /* reads from user space */ \
- (_regs)->pc = (_pc); \
- if (current->mm) \
- (_regs)->d5 = current->mm->start_data; \
- (_regs)->sr &= ~0x2000; \
- wrusp(_usp); \
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->pc = (_pc); \
+ ((struct switch_stack *)(_regs))[-1].a6 = 0; \
+ reformat(_regs); \
+ if (current->mm) \
+ (_regs)->d5 = current->mm->start_data; \
+ (_regs)->sr &= ~0x2000; \
+ wrusp(_usp); \
} while(0)
/* Forward declaration, a strange C thing */
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index 1e19c457de7d..47258e86e8c4 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -46,11 +46,9 @@ struct pt_regs {
#else
unsigned short sr;
unsigned long pc;
-#ifndef NO_FORMAT_VEC
unsigned format : 4; /* frame format specifier */
unsigned vector : 12; /* vector offset */
#endif
-#endif
};
/*
diff --git a/include/asm-mips/hw_irq.h b/include/asm-mips/hw_irq.h
index c854d017c0e5..458d9fdc76bf 100644
--- a/include/asm-mips/hw_irq.h
+++ b/include/asm-mips/hw_irq.h
@@ -19,9 +19,9 @@ extern void init_8259A(int aeoi);
extern atomic_t irq_err_count;
-/* This may not be apropriate for all machines, we'll see ... */
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{
-}
+/*
+ * interrupt-retrigger: NOP for now. This may not be apropriate for all
+ * machines, we'll see ...
+ */
#endif /* __ASM_HW_IRQ_H */
diff --git a/include/asm-mips/mach-mips/irq.h b/include/asm-mips/mach-mips/irq.h
index 083d9c512a04..e994b0c01227 100644
--- a/include/asm-mips/mach-mips/irq.h
+++ b/include/asm-mips/mach-mips/irq.h
@@ -4,10 +4,4 @@
#define NR_IRQS 256
-#ifdef CONFIG_SMP
-
-#define ARCH_HAS_IRQ_PER_CPU
-
-#endif
-
#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/include/asm-parisc/hw_irq.h b/include/asm-parisc/hw_irq.h
index 151426e27521..6707f7df3921 100644
--- a/include/asm-parisc/hw_irq.h
+++ b/include/asm-parisc/hw_irq.h
@@ -3,15 +3,6 @@
/*
* linux/include/asm/hw_irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- * moved some of the old arch/i386/kernel/irq.h to here. VY
- *
- * IRQ/IPI changes taken from work by Thomas Radke
- * <tomsoft@informatik.tu-chemnitz.de>
*/
-extern void hw_resend_irq(struct hw_interrupt_type *, unsigned int);
-
#endif
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index 377ba90c7d02..5cae260615a2 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -26,11 +26,6 @@
#define NR_IRQS (CPU_IRQ_MAX + 1)
-/*
- * IRQ line status macro IRQ_PER_CPU is used
- */
-#define ARCH_HAS_IRQ_PER_CPU
-
static __inline__ int irq_canonicalize(int irq)
{
return (irq == 2) ? 9 : irq;
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index ce0f7db63c16..d40359204aba 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -86,27 +86,27 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#define mask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
- if (desc->handler && desc->handler->disable) \
- desc->handler->disable(irq); \
+ if (desc->chip && desc->chip->disable) \
+ desc->chip->disable(irq); \
})
#define unmask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
- if (desc->handler && desc->handler->enable) \
- desc->handler->enable(irq); \
+ if (desc->chip && desc->chip->enable) \
+ desc->chip->enable(irq); \
})
#define ack_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
- if (desc->handler && desc->handler->ack) \
- desc->handler->ack(irq); \
+ if (desc->chip && desc->chip->ack) \
+ desc->chip->ack(irq); \
})
-/* Should we handle this via lost interrupts and IPIs or should we don't care like
- * we do now ? --BenH.
+/*
+ * interrupt-retrigger: should we handle this via lost interrupts and IPIs
+ * or should we not care like we do now ? --BenH.
*/
struct hw_interrupt_type;
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index a10feec29d4d..eb5f33e1977a 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -30,11 +30,6 @@
#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
-/*
- * IRQ line status macro IRQ_PER_CPU is used
- */
-#define ARCH_HAS_IRQ_PER_CPU
-
#define get_irq_desc(irq) (&irq_desc[(irq)])
/* Define a way to iterate across irqs. */
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index c01786ab5fa6..532bfee934f4 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -18,6 +18,8 @@ struct die_args {
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
+extern int register_page_fault_notifier(struct notifier_block *);
+extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head powerpc_die_chain;
/* Grossly misnamed. */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index f466bc804f41..2d0af52c823d 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -50,6 +50,8 @@ typedef unsigned int kprobe_opcode_t;
IS_TWI(instr) || IS_TDI(instr))
#define ARCH_SUPPORTS_KRETPROBES
+#define ARCH_INACTIVE_KPROBE_COUNT 1
+
void kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index 5d2c9e6c4be2..46afd29b904e 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -242,7 +242,7 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end);
+ resource_size_t *start, resource_size_t *end);
#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 92f3e5507d22..bbc3844b086f 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 61434edbad7b..11ffaaa5da16 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -133,7 +133,7 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end);
+ resource_size_t *start, resource_size_t *end);
#endif /* __KERNEL__ */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 4d2b126ba159..0ddcdba79e4a 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -12,6 +12,9 @@
* Copyright (C) 1992, Linus Torvalds
*
*/
+
+#ifdef __KERNEL__
+
#include <linux/compiler.h>
/*
@@ -50,19 +53,6 @@
* with operation of the form "set_bit(bitnr, flags)".
*/
-/* set ALIGN_CS to 1 if the SMP safe bit operations should
- * align the address to 4 byte boundary. It seems to work
- * without the alignment.
- */
-#ifdef __KERNEL__
-#define ALIGN_CS 0
-#else
-#define ALIGN_CS 1
-#ifndef CONFIG_SMP
-#error "bitops won't work without CONFIG_SMP"
-#endif
-#endif
-
/* bitmap tables from arch/S390/kernel/bitmap.S */
extern const char _oi_bitmap[];
extern const char _ni_bitmap[];
@@ -121,10 +111,6 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR mask */
@@ -141,10 +127,6 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND mask */
@@ -161,10 +143,6 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR mask */
@@ -182,10 +160,6 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR/test mask */
@@ -205,10 +179,6 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND/test mask */
@@ -228,10 +198,6 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
-#if ALIGN_CS == 1
- nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
- addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
-#endif
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR/test mask */
@@ -834,8 +800,6 @@ static inline int sched_find_first_bit(unsigned long *b)
#include <asm-generic/bitops/hweight.h>
-#ifdef __KERNEL__
-
/*
* ATTENTION: intel byte ordering convention for ext2 and minix !!
* bit 0 is the LSB of addr; bit 31 is the MSB of addr;
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 089cf567c317..2b1619306351 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -276,6 +276,8 @@ extern void wait_cons_dev(void);
extern void clear_all_subchannels(void);
+extern void css_schedule_reprobe(void);
+
#endif
#endif
diff --git a/include/asm-s390/cmb.h b/include/asm-s390/cmb.h
index 2d09950a9c11..241756f80df3 100644
--- a/include/asm-s390/cmb.h
+++ b/include/asm-s390/cmb.h
@@ -44,10 +44,6 @@ struct cmbdata {
#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
/* enable channel measurement */
#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
-/* reset channel measurement block */
-#define BIODASDRESETCMB _IO(DASD_IOCTL_LETTER,34)
-/* read channel measurement data */
-#define BIODASDREADCMB _IOWR(DASD_IOCTL_LETTER,32,__u64)
/* read channel measurement data */
#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index 1630c26e8f45..c042f9578081 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -68,10 +68,12 @@ typedef struct dasd_information2_t {
* 0x00: default features
* 0x01: readonly (ro)
* 0x02: use diag discipline (diag)
+ * 0x04: set the device initially online (internal use only)
*/
-#define DASD_FEATURE_DEFAULT 0
-#define DASD_FEATURE_READONLY 1
-#define DASD_FEATURE_USEDIAG 2
+#define DASD_FEATURE_DEFAULT 0x00
+#define DASD_FEATURE_READONLY 0x01
+#define DASD_FEATURE_USEDIAG 0x02
+#define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_PARTN_BITS 2
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 8e0c7ed73d03..0a518915bf90 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -63,6 +63,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
+ .preempt_count = 1, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index e21443d3ea1d..aa7a243862e1 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -394,11 +394,9 @@
#ifdef __KERNEL__
-/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
-
#define __syscall_return(type, res) \
do { \
- if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ if ((unsigned long)(res) >= (unsigned long)(-4095)) {\
errno = -(res); \
res = -1; \
} \
diff --git a/include/asm-sh/hw_irq.h b/include/asm-sh/hw_irq.h
index 1d934fb2c581..fed26616967a 100644
--- a/include/asm-sh/hw_irq.h
+++ b/include/asm-sh/hw_irq.h
@@ -1,9 +1,4 @@
#ifndef __ASM_SH_HW_IRQ_H
#define __ASM_SH_HW_IRQ_H
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{
- /* Nothing to do */
-}
-
#endif /* __ASM_SH_HW_IRQ_H */
diff --git a/include/asm-sh64/hw_irq.h b/include/asm-sh64/hw_irq.h
index ae718d1f2d6c..ebb39089b0ac 100644
--- a/include/asm-sh64/hw_irq.h
+++ b/include/asm-sh64/hw_irq.h
@@ -11,6 +11,5 @@
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
-static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
#endif /* __ASM_SH64_HW_IRQ_H */
diff --git a/include/asm-sparc/io.h b/include/asm-sparc/io.h
index a42df208d590..cab0b851b8b1 100644
--- a/include/asm-sparc/io.h
+++ b/include/asm-sparc/io.h
@@ -249,6 +249,22 @@ extern void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(X,Y) ioremap((X),(Y))
extern void iounmap(volatile void __iomem *addr);
+#define ioread8(X) readb(X)
+#define ioread16(X) readw(X)
+#define ioread32(X) readl(X)
+#define iowrite8(val,X) writeb(val,X)
+#define iowrite16(val,X) writew(val,X)
+#define iowrite32(val,X) writel(val,X)
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
/*
* Bus number may be in res->flags... somewhere.
*/
diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h
index c5e3d26eabd3..f9cf44c07164 100644
--- a/include/asm-sparc/prom.h
+++ b/include/asm-sparc/prom.h
@@ -35,6 +35,8 @@ struct property {
int length;
void *value;
struct property *next;
+ unsigned long _flags;
+ unsigned int unique_id;
};
struct device_node {
@@ -58,8 +60,15 @@ struct device_node {
struct kref kref;
unsigned long _flags;
void *data;
+ unsigned int unique_id;
};
+/* flag descriptions */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+
+#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
+#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+
static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
{
dn->pde = de;
@@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np,
extern int of_device_is_compatible(struct device_node *device, const char *);
extern void *of_get_property(struct device_node *node, const char *name,
int *lenp);
+extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 3c2b5bc8650b..0f5b89c9323b 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -162,4 +162,47 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
#endif /* PCI */
+
+/* Now for the API extensions over the pci_ one */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d) (1)
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << INTERNODE_CACHE_SHIFT);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* could define this in terms of the dma_cache ... operations,
+ * but if you get this on a platform, you should convert the platform
+ * to using the generic device DMA API */
+ BUG();
+}
+
#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index f8d57bb5570c..b591d0e8d8f0 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -208,7 +208,55 @@ static void sun_fd_enable_dma(void)
pdma_areasize = pdma_size;
}
-extern irqreturn_t sparc_floppy_irq(int, void *, struct pt_regs *);
+irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
+{
+ if (likely(doing_pdma)) {
+ void __iomem *stat = (void __iomem *) fdc_status;
+ unsigned char *vaddr = pdma_vaddr;
+ unsigned long size = pdma_size;
+ u8 val;
+
+ while (size) {
+ val = readb(stat);
+ if (unlikely(!(val & 0x80))) {
+ pdma_vaddr = vaddr;
+ pdma_size = size;
+ return IRQ_HANDLED;
+ }
+ if (unlikely(!(val & 0x20))) {
+ pdma_vaddr = vaddr;
+ pdma_size = size;
+ doing_pdma = 0;
+ goto main_interrupt;
+ }
+ if (val & 0x40) {
+ /* read */
+ *vaddr++ = readb(stat + 1);
+ } else {
+ unsigned char data = *vaddr++;
+
+ /* write */
+ writeb(data, stat + 1);
+ }
+ size--;
+ }
+
+ pdma_vaddr = vaddr;
+ pdma_size = size;
+
+ /* Send Terminal Count pulse to floppy controller. */
+ val = readb(auxio_register);
+ val |= AUXIO_AUX1_FTCNT;
+ writeb(val, auxio_register);
+ val &= ~AUXIO_AUX1_FTCNT;
+ writeb(val, auxio_register);
+
+ doing_pdma = 0;
+ }
+
+main_interrupt:
+ return floppy_interrupt(irq, dev_cookie, regs);
+}
static int sun_fd_request_irq(void)
{
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 4040d127ac3e..11251bdd00cb 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -17,6 +17,8 @@ struct die_args {
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
+extern int register_page_fault_notifier(struct notifier_block *);
+extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head sparc64die_chain;
extern void bad_trap(struct pt_regs *, long);
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index e9bb26f770ed..15065af566c2 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define arch_remove_kprobe(p) do {} while (0)
+#define ARCH_INACTIVE_KPROBE_COUNT 0
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h
index 6d1556c0c263..265614d497c4 100644
--- a/include/asm-sparc64/prom.h
+++ b/include/asm-sparc64/prom.h
@@ -35,6 +35,8 @@ struct property {
int length;
void *value;
struct property *next;
+ unsigned long _flags;
+ unsigned int unique_id;
};
struct device_node {
@@ -58,8 +60,15 @@ struct device_node {
struct kref kref;
unsigned long _flags;
void *data;
+ unsigned int unique_id;
};
+/* flag descriptions */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+
+#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
+#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+
static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
{
dn->pde = de;
@@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np,
extern int of_device_is_compatible(struct device_node *device, const char *);
extern void *of_get_property(struct device_node *node, const char *name,
int *lenp);
+extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 0e234e201bd6..98a6c613589d 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -1,6 +1,9 @@
#ifndef _ASM_SPARC64_TOPOLOGY_H
#define _ASM_SPARC64_TOPOLOGY_H
+#include <asm/spitfire.h>
+#define smt_capable() (tlb_type == hypervisor)
+
#include <asm-generic/topology.h>
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-um/hw_irq.h b/include/asm-um/hw_irq.h
index 4ee38c0b6a64..1cf84cf5f21a 100644
--- a/include/asm-um/hw_irq.h
+++ b/include/asm-um/hw_irq.h
@@ -4,7 +4,4 @@
#include "asm/irq.h"
#include "asm/archparam.h"
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{}
-
#endif
diff --git a/include/asm-v850/hw_irq.h b/include/asm-v850/hw_irq.h
index a8aab4342712..043e94bb6bd8 100644
--- a/include/asm-v850/hw_irq.h
+++ b/include/asm-v850/hw_irq.h
@@ -1,8 +1,4 @@
#ifndef __V850_HW_IRQ_H__
#define __V850_HW_IRQ_H__
-static inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
-{
-}
-
#endif /* __V850_HW_IRQ_H__ */
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
new file mode 100644
index 000000000000..387c8f66af7d
--- /dev/null
+++ b/include/asm-x86_64/alternative.h
@@ -0,0 +1,146 @@
+#ifndef _X86_64_ALTERNATIVE_H
+#define _X86_64_ALTERNATIVE_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+struct alt_instr {
+ u8 *instr; /* original instruction */
+ u8 *replacement;
+ u8 cpuid; /* cpuid bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction, <= instrlen */
+ u8 pad[5];
+};
+
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+struct module;
+extern void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end);
+extern void alternatives_smp_module_del(struct module *mod);
+extern void alternatives_smp_switch(int smp);
+
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement make sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature), ##input)
+
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, newinstr, feature, output, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c[feat]\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" : output : [feat] "i" (feature), ##input)
+
+/*
+ * Alternative inline assembly for SMP.
+ *
+ * alternative_smp() takes two versions (SMP first, UP second) and is
+ * for more complex stuff such as spinlocks.
+ *
+ * The LOCK_PREFIX macro defined here replaces the LOCK and
+ * LOCK_PREFIX macros used everywhere in the source tree.
+ *
+ * SMP alternatives use the same data structures as the other
+ * alternatives and the X86_FEATURE_UP flag to indicate the case of a
+ * UP system running a SMP kernel. The existing apply_alternatives()
+ * works fine for patching a SMP kernel for UP.
+ *
+ * The SMP alternative tables can be kept after boot and contain both
+ * UP and SMP versions of the instructions to allow switching back to
+ * SMP at runtime, when hotplugging in a new CPU, which is especially
+ * useful in virtualized environments.
+ *
+ * The very common lock prefix is handled as special case in a
+ * separate table which is a pure address list without replacement ptr
+ * and size information. That keeps the table sizes small.
+ */
+
+#ifdef CONFIG_SMP
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile ("661:\n\t" smpinstr "\n662:\n" \
+ ".section .smp_altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte 0x66\n" /* X86_FEATURE_UP */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .smp_altinstr_replacement,\"awx\"\n" \
+ "663:\n\t" upinstr "\n" /* replacement */ \
+ "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
+ ".previous" : args)
+
+#define LOCK_PREFIX \
+ ".section .smp_locks,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661f\n" /* address */ \
+ ".previous\n" \
+ "661:\n\tlock; "
+
+#else /* ! CONFIG_SMP */
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile (upinstr : args)
+#define LOCK_PREFIX ""
+#endif
+
+#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index a731be2204d2..9c96a0a8d1bd 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -49,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg)
static __inline__ void apic_wait_icr_idle(void)
{
- while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
+ while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
+ cpu_relax();
}
static inline void ack_APIC_irq(void)
@@ -79,30 +80,23 @@ extern void init_apic_mappings (void);
extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
-extern void setup_apic_nmi_watchdog (void);
-extern int reserve_lapic_nmi(void);
-extern void release_lapic_nmi(void);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
extern void enable_APIC_timer(void);
extern void clustered_apic_check(void);
-extern void nmi_watchdog_default(void);
-extern int setup_nmi_watchdog(char *);
+extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
+ unsigned char msg_type, unsigned char mask);
-extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
+#define K8_APIC_EXT_LVT_BASE 0x500
+#define K8_APIC_EXT_INT_MSG_FIX 0x0
+#define K8_APIC_EXT_INT_MSG_SMI 0x2
+#define K8_APIC_EXT_INT_MSG_NMI 0x4
+#define K8_APIC_EXT_INT_MSG_EXT 0x7
+#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
extern int disable_timer_pin_1;
-extern void setup_threshold_lvt(unsigned long lvt_off);
void smp_send_timer_broadcast_ipi(void);
void switch_APIC_timer_to_ipi(void *cpumask);
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index bd3fa67ed835..007e88d6d43f 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -1,7 +1,7 @@
#ifndef __ARCH_X86_64_ATOMIC__
#define __ARCH_X86_64_ATOMIC__
-#include <asm/types.h>
+#include <asm/alternative.h>
/* atomic_t should be 32 bit signed type */
@@ -52,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "addl %1,%0"
+ LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -67,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "subl %1,%0"
+ LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -86,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "subl %2,%0; sete %1"
+ LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -101,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "incl %0"
+ LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -115,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "decl %0"
+ LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -133,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "decl %0; sete %1"
+ LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -152,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "incl %0; sete %1"
+ LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -172,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "addl %2,%0; sets %1"
+ LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -189,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
__asm__ __volatile__(
- LOCK "xaddl %0, %1;"
+ LOCK_PREFIX "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@ -237,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t;
static __inline__ void atomic64_add(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK "addq %1,%0"
+ LOCK_PREFIX "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -252,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
static __inline__ void atomic64_sub(long i, atomic64_t *v)
{
__asm__ __volatile__(
- LOCK "subq %1,%0"
+ LOCK_PREFIX "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -271,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "subq %2,%0; sete %1"
+ LOCK_PREFIX "subq %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -286,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
static __inline__ void atomic64_inc(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK "incq %0"
+ LOCK_PREFIX "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -300,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
static __inline__ void atomic64_dec(atomic64_t *v)
{
__asm__ __volatile__(
- LOCK "decq %0"
+ LOCK_PREFIX "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -318,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "decq %0; sete %1"
+ LOCK_PREFIX "decq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -337,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "incq %0; sete %1"
+ LOCK_PREFIX "incq %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -357,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "addq %2,%0; sets %1"
+ LOCK_PREFIX "addq %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -374,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
__asm__ __volatile__(
- LOCK "xaddq %0, %1;"
+ LOCK_PREFIX "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@ -418,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK "andl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK "orl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index e9bf933d25d0..f7ba57b1cc08 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -5,12 +5,7 @@
* Copyright 1992, Linus Torvalds.
*/
-
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
+#include <asm/alternative.h>
#define ADDR (*(volatile long *) addr)
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
new file mode 100644
index 000000000000..6e1654f30986
--- /dev/null
+++ b/include/asm-x86_64/calgary.h
@@ -0,0 +1,66 @@
+/*
+ * Derived from include/asm-powerpc/iommu.h
+ *
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_64_CALGARY_H
+#define _ASM_X86_64_CALGARY_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <asm/types.h>
+
+struct iommu_table {
+ unsigned long it_base; /* mapped address of tce table */
+ unsigned long it_hint; /* Hint for next alloc */
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+ spinlock_t it_lock; /* Protects it_map */
+ unsigned int it_size; /* Size of iommu table in entries */
+ unsigned char it_busno; /* Bus number this table belongs to */
+ void __iomem *bbar;
+ u64 tar_val;
+ struct timer_list watchdog_timer;
+};
+
+#define TCE_TABLE_SIZE_UNSPECIFIED ~0
+#define TCE_TABLE_SIZE_64K 0
+#define TCE_TABLE_SIZE_128K 1
+#define TCE_TABLE_SIZE_256K 2
+#define TCE_TABLE_SIZE_512K 3
+#define TCE_TABLE_SIZE_1M 4
+#define TCE_TABLE_SIZE_2M 5
+#define TCE_TABLE_SIZE_4M 6
+#define TCE_TABLE_SIZE_8M 7
+
+#ifdef CONFIG_CALGARY_IOMMU
+extern int calgary_iommu_init(void);
+extern void detect_calgary(void);
+#else
+static inline int calgary_iommu_init(void) { return 1; }
+static inline void detect_calgary(void) { return; }
+#endif
+
+static inline unsigned int bus_to_phb(unsigned char busno)
+{
+ return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
+}
+
+#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 662964b74e34..ee792faaca01 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -46,6 +46,7 @@
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
+#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
@@ -65,6 +66,8 @@
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
+#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
+#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 498f66df36b9..b6da83dcc7a6 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address;
extern struct dma_mapping_ops* dma_ops;
extern int iommu_merge;
+static inline int valid_dma_direction(int dma_direction)
+{
+ return ((dma_direction == DMA_BIDIRECTIONAL) ||
+ (dma_direction == DMA_TO_DEVICE) ||
+ (dma_direction == DMA_FROM_DEVICE));
+}
+
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
if (dma_ops->mapping_error)
@@ -72,6 +79,7 @@ static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_single(hwdev, ptr, size, direction);
}
@@ -79,6 +87,7 @@ static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
dma_ops->unmap_single(dev, addr, size, direction);
}
@@ -91,6 +100,7 @@ static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_cpu)
dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
direction);
@@ -101,6 +111,7 @@ static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_device)
dma_ops->sync_single_for_device(hwdev, dma_handle, size,
direction);
@@ -111,6 +122,7 @@ static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_cpu) {
dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
}
@@ -122,6 +134,7 @@ static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_device)
dma_ops->sync_single_range_for_device(hwdev, dma_handle,
offset, size, direction);
@@ -133,6 +146,7 @@ static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_cpu)
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
@@ -142,6 +156,7 @@ static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_device) {
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
}
@@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_sg(hwdev, sg, nents, direction);
}
@@ -159,6 +175,7 @@ static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
+ BUG_ON(!valid_dma_direction(direction));
dma_ops->unmap_sg(hwdev, sg, nents, direction);
}
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index c556208d3dd7..a37c16f06289 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -1,4 +1,4 @@
-/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $
+/*
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
deleted file mode 100644
index ada497b0b55b..000000000000
--- a/include/asm-x86_64/gart-mapping.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _X8664_GART_MAPPING_H
-#define _X8664_GART_MAPPING_H 1
-
-#include <linux/types.h>
-#include <asm/types.h>
-
-struct device;
-
-extern void*
-gart_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-extern int
-gart_dma_supported(struct device *hwdev, u64 mask);
-
-#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 18ff7ee9e774..b39098408b69 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -55,7 +55,7 @@
extern int is_hpet_enabled(void);
extern int hpet_rtc_timer_init(void);
-extern int oem_force_hpet_timer(void);
+extern int apic_is_clustered_box(void);
extern int hpet_use_timer;
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 3de96fd86a70..48a4a5364e85 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -12,8 +12,6 @@
* <tomsoft@informatik.tu-chemnitz.de>
*
* hacked by Andi Kleen for x86-64.
- *
- * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
*/
#ifndef __ASSEMBLY__
@@ -126,18 +124,9 @@ asmlinkage void IRQ_NAME(nr); \
__asm__( \
"\n.p2align\n" \
"IRQ" #nr "_interrupt:\n\t" \
- "push $" #nr "-256 ; " \
+ "push $~(" #nr ") ; " \
"jmp common_interrupt");
-#if defined(CONFIG_X86_IO_APIC)
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
- if (IO_APIC_IRQ(i))
- send_IPI_self(IO_APIC_VECTOR(i));
-}
-#else
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-#endif
-
#define platform_legacy_irq(irq) ((irq) < 16)
#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index b4f4b172b15a..5b52ce507338 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -4,317 +4,15 @@
/*
* This file contains the system call numbers of the ia32 port,
* this is for the kernel only.
+ * Only add syscalls here where some part of the kernel needs to know
+ * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
*/
#define __NR_ia32_restart_syscall 0
#define __NR_ia32_exit 1
-#define __NR_ia32_fork 2
#define __NR_ia32_read 3
#define __NR_ia32_write 4
-#define __NR_ia32_open 5
-#define __NR_ia32_close 6
-#define __NR_ia32_waitpid 7
-#define __NR_ia32_creat 8
-#define __NR_ia32_link 9
-#define __NR_ia32_unlink 10
-#define __NR_ia32_execve 11
-#define __NR_ia32_chdir 12
-#define __NR_ia32_time 13
-#define __NR_ia32_mknod 14
-#define __NR_ia32_chmod 15
-#define __NR_ia32_lchown 16
-#define __NR_ia32_break 17
-#define __NR_ia32_oldstat 18
-#define __NR_ia32_lseek 19
-#define __NR_ia32_getpid 20
-#define __NR_ia32_mount 21
-#define __NR_ia32_umount 22
-#define __NR_ia32_setuid 23
-#define __NR_ia32_getuid 24
-#define __NR_ia32_stime 25
-#define __NR_ia32_ptrace 26
-#define __NR_ia32_alarm 27
-#define __NR_ia32_oldfstat 28
-#define __NR_ia32_pause 29
-#define __NR_ia32_utime 30
-#define __NR_ia32_stty 31
-#define __NR_ia32_gtty 32
-#define __NR_ia32_access 33
-#define __NR_ia32_nice 34
-#define __NR_ia32_ftime 35
-#define __NR_ia32_sync 36
-#define __NR_ia32_kill 37
-#define __NR_ia32_rename 38
-#define __NR_ia32_mkdir 39
-#define __NR_ia32_rmdir 40
-#define __NR_ia32_dup 41
-#define __NR_ia32_pipe 42
-#define __NR_ia32_times 43
-#define __NR_ia32_prof 44
-#define __NR_ia32_brk 45
-#define __NR_ia32_setgid 46
-#define __NR_ia32_getgid 47
-#define __NR_ia32_signal 48
-#define __NR_ia32_geteuid 49
-#define __NR_ia32_getegid 50
-#define __NR_ia32_acct 51
-#define __NR_ia32_umount2 52
-#define __NR_ia32_lock 53
-#define __NR_ia32_ioctl 54
-#define __NR_ia32_fcntl 55
-#define __NR_ia32_mpx 56
-#define __NR_ia32_setpgid 57
-#define __NR_ia32_ulimit 58
-#define __NR_ia32_oldolduname 59
-#define __NR_ia32_umask 60
-#define __NR_ia32_chroot 61
-#define __NR_ia32_ustat 62
-#define __NR_ia32_dup2 63
-#define __NR_ia32_getppid 64
-#define __NR_ia32_getpgrp 65
-#define __NR_ia32_setsid 66
-#define __NR_ia32_sigaction 67
-#define __NR_ia32_sgetmask 68
-#define __NR_ia32_ssetmask 69
-#define __NR_ia32_setreuid 70
-#define __NR_ia32_setregid 71
-#define __NR_ia32_sigsuspend 72
-#define __NR_ia32_sigpending 73
-#define __NR_ia32_sethostname 74
-#define __NR_ia32_setrlimit 75
-#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */
-#define __NR_ia32_getrusage 77
-#define __NR_ia32_gettimeofday 78
-#define __NR_ia32_settimeofday 79
-#define __NR_ia32_getgroups 80
-#define __NR_ia32_setgroups 81
-#define __NR_ia32_select 82
-#define __NR_ia32_symlink 83
-#define __NR_ia32_oldlstat 84
-#define __NR_ia32_readlink 85
-#define __NR_ia32_uselib 86
-#define __NR_ia32_swapon 87
-#define __NR_ia32_reboot 88
-#define __NR_ia32_readdir 89
-#define __NR_ia32_mmap 90
-#define __NR_ia32_munmap 91
-#define __NR_ia32_truncate 92
-#define __NR_ia32_ftruncate 93
-#define __NR_ia32_fchmod 94
-#define __NR_ia32_fchown 95
-#define __NR_ia32_getpriority 96
-#define __NR_ia32_setpriority 97
-#define __NR_ia32_profil 98
-#define __NR_ia32_statfs 99
-#define __NR_ia32_fstatfs 100
-#define __NR_ia32_ioperm 101
-#define __NR_ia32_socketcall 102
-#define __NR_ia32_syslog 103
-#define __NR_ia32_setitimer 104
-#define __NR_ia32_getitimer 105
-#define __NR_ia32_stat 106
-#define __NR_ia32_lstat 107
-#define __NR_ia32_fstat 108
-#define __NR_ia32_olduname 109
-#define __NR_ia32_iopl 110
-#define __NR_ia32_vhangup 111
-#define __NR_ia32_idle 112
-#define __NR_ia32_vm86old 113
-#define __NR_ia32_wait4 114
-#define __NR_ia32_swapoff 115
-#define __NR_ia32_sysinfo 116
-#define __NR_ia32_ipc 117
-#define __NR_ia32_fsync 118
-#define __NR_ia32_sigreturn 119
-#define __NR_ia32_clone 120
-#define __NR_ia32_setdomainname 121
-#define __NR_ia32_uname 122
-#define __NR_ia32_modify_ldt 123
-#define __NR_ia32_adjtimex 124
-#define __NR_ia32_mprotect 125
-#define __NR_ia32_sigprocmask 126
-#define __NR_ia32_create_module 127
-#define __NR_ia32_init_module 128
-#define __NR_ia32_delete_module 129
-#define __NR_ia32_get_kernel_syms 130
-#define __NR_ia32_quotactl 131
-#define __NR_ia32_getpgid 132
-#define __NR_ia32_fchdir 133
-#define __NR_ia32_bdflush 134
-#define __NR_ia32_sysfs 135
-#define __NR_ia32_personality 136
-#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_ia32_setfsuid 138
-#define __NR_ia32_setfsgid 139
-#define __NR_ia32__llseek 140
-#define __NR_ia32_getdents 141
-#define __NR_ia32__newselect 142
-#define __NR_ia32_flock 143
-#define __NR_ia32_msync 144
-#define __NR_ia32_readv 145
-#define __NR_ia32_writev 146
-#define __NR_ia32_getsid 147
-#define __NR_ia32_fdatasync 148
-#define __NR_ia32__sysctl 149
-#define __NR_ia32_mlock 150
-#define __NR_ia32_munlock 151
-#define __NR_ia32_mlockall 152
-#define __NR_ia32_munlockall 153
-#define __NR_ia32_sched_setparam 154
-#define __NR_ia32_sched_getparam 155
-#define __NR_ia32_sched_setscheduler 156
-#define __NR_ia32_sched_getscheduler 157
-#define __NR_ia32_sched_yield 158
-#define __NR_ia32_sched_get_priority_max 159
-#define __NR_ia32_sched_get_priority_min 160
-#define __NR_ia32_sched_rr_get_interval 161
-#define __NR_ia32_nanosleep 162
-#define __NR_ia32_mremap 163
-#define __NR_ia32_setresuid 164
-#define __NR_ia32_getresuid 165
-#define __NR_ia32_vm86 166
-#define __NR_ia32_query_module 167
-#define __NR_ia32_poll 168
-#define __NR_ia32_nfsservctl 169
-#define __NR_ia32_setresgid 170
-#define __NR_ia32_getresgid 171
-#define __NR_ia32_prctl 172
+#define __NR_ia32_sigreturn 119
#define __NR_ia32_rt_sigreturn 173
-#define __NR_ia32_rt_sigaction 174
-#define __NR_ia32_rt_sigprocmask 175
-#define __NR_ia32_rt_sigpending 176
-#define __NR_ia32_rt_sigtimedwait 177
-#define __NR_ia32_rt_sigqueueinfo 178
-#define __NR_ia32_rt_sigsuspend 179
-#define __NR_ia32_pread 180
-#define __NR_ia32_pwrite 181
-#define __NR_ia32_chown 182
-#define __NR_ia32_getcwd 183
-#define __NR_ia32_capget 184
-#define __NR_ia32_capset 185
-#define __NR_ia32_sigaltstack 186
-#define __NR_ia32_sendfile 187
-#define __NR_ia32_getpmsg 188 /* some people actually want streams */
-#define __NR_ia32_putpmsg 189 /* some people actually want streams */
-#define __NR_ia32_vfork 190
-#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_ia32_mmap2 192
-#define __NR_ia32_truncate64 193
-#define __NR_ia32_ftruncate64 194
-#define __NR_ia32_stat64 195
-#define __NR_ia32_lstat64 196
-#define __NR_ia32_fstat64 197
-#define __NR_ia32_lchown32 198
-#define __NR_ia32_getuid32 199
-#define __NR_ia32_getgid32 200
-#define __NR_ia32_geteuid32 201
-#define __NR_ia32_getegid32 202
-#define __NR_ia32_setreuid32 203
-#define __NR_ia32_setregid32 204
-#define __NR_ia32_getgroups32 205
-#define __NR_ia32_setgroups32 206
-#define __NR_ia32_fchown32 207
-#define __NR_ia32_setresuid32 208
-#define __NR_ia32_getresuid32 209
-#define __NR_ia32_setresgid32 210
-#define __NR_ia32_getresgid32 211
-#define __NR_ia32_chown32 212
-#define __NR_ia32_setuid32 213
-#define __NR_ia32_setgid32 214
-#define __NR_ia32_setfsuid32 215
-#define __NR_ia32_setfsgid32 216
-#define __NR_ia32_pivot_root 217
-#define __NR_ia32_mincore 218
-#define __NR_ia32_madvise 219
-#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */
-#define __NR_ia32_getdents64 220
-#define __NR_ia32_fcntl64 221
-#define __NR_ia32_tuxcall 222
-#define __NR_ia32_security 223
-#define __NR_ia32_gettid 224
-#define __NR_ia32_readahead 225
-#define __NR_ia32_setxattr 226
-#define __NR_ia32_lsetxattr 227
-#define __NR_ia32_fsetxattr 228
-#define __NR_ia32_getxattr 229
-#define __NR_ia32_lgetxattr 230
-#define __NR_ia32_fgetxattr 231
-#define __NR_ia32_listxattr 232
-#define __NR_ia32_llistxattr 233
-#define __NR_ia32_flistxattr 234
-#define __NR_ia32_removexattr 235
-#define __NR_ia32_lremovexattr 236
-#define __NR_ia32_fremovexattr 237
-#define __NR_ia32_tkill 238
-#define __NR_ia32_sendfile64 239
-#define __NR_ia32_futex 240
-#define __NR_ia32_sched_setaffinity 241
-#define __NR_ia32_sched_getaffinity 242
-#define __NR_ia32_set_thread_area 243
-#define __NR_ia32_get_thread_area 244
-#define __NR_ia32_io_setup 245
-#define __NR_ia32_io_destroy 246
-#define __NR_ia32_io_getevents 247
-#define __NR_ia32_io_submit 248
-#define __NR_ia32_io_cancel 249
-#define __NR_ia32_exit_group 252
-#define __NR_ia32_lookup_dcookie 253
-#define __NR_ia32_sys_epoll_create 254
-#define __NR_ia32_sys_epoll_ctl 255
-#define __NR_ia32_sys_epoll_wait 256
-#define __NR_ia32_remap_file_pages 257
-#define __NR_ia32_set_tid_address 258
-#define __NR_ia32_timer_create 259
-#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1)
-#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2)
-#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3)
-#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4)
-#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5)
-#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6)
-#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7)
-#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8)
-#define __NR_ia32_statfs64 268
-#define __NR_ia32_fstatfs64 269
-#define __NR_ia32_tgkill 270
-#define __NR_ia32_utimes 271
-#define __NR_ia32_fadvise64_64 272
-#define __NR_ia32_vserver 273
-#define __NR_ia32_mbind 274
-#define __NR_ia32_get_mempolicy 275
-#define __NR_ia32_set_mempolicy 276
-#define __NR_ia32_mq_open 277
-#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1)
-#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2)
-#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3)
-#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4)
-#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5)
-#define __NR_ia32_kexec 283
-#define __NR_ia32_waitid 284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_ia32_add_key 286
-#define __NR_ia32_request_key 287
-#define __NR_ia32_keyctl 288
-#define __NR_ia32_ioprio_set 289
-#define __NR_ia32_ioprio_get 290
-#define __NR_ia32_inotify_init 291
-#define __NR_ia32_inotify_add_watch 292
-#define __NR_ia32_inotify_rm_watch 293
-#define __NR_ia32_migrate_pages 294
-#define __NR_ia32_openat 295
-#define __NR_ia32_mkdirat 296
-#define __NR_ia32_mknodat 297
-#define __NR_ia32_fchownat 298
-#define __NR_ia32_futimesat 299
-#define __NR_ia32_fstatat64 300
-#define __NR_ia32_unlinkat 301
-#define __NR_ia32_renameat 302
-#define __NR_ia32_linkat 303
-#define __NR_ia32_symlinkat 304
-#define __NR_ia32_readlinkat 305
-#define __NR_ia32_fchmodat 306
-#define __NR_ia32_faccessat 307
-#define __NR_ia32_pselect6 308
-#define __NR_ia32_ppoll 309
-#define __NR_ia32_unshare 310
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
new file mode 100644
index 000000000000..59c396431569
--- /dev/null
+++ b/include/asm-x86_64/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
+#ifndef X86_64_INTEL_ARCH_PERFMON_H
+#define X86_64_INTEL_ARCH_PERFMON_H 1
+
+#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
+#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
+
+#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
+#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
+
+#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
+#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
+#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
+#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
+
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
+
+#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
new file mode 100644
index 000000000000..699dd6961eda
--- /dev/null
+++ b/include/asm-x86_64/k8.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_K8_H
+#define _ASM_K8_H 1
+
+#include <linux/pci.h>
+
+extern struct pci_device_id k8_nb_ids[];
+
+extern int early_is_k8_nb(u32 value);
+extern struct pci_dev **k8_northbridges;
+extern int num_k8_northbridges;
+extern int cache_k8_northbridges(void);
+extern void k8_flush_garts(void);
+
+#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index cf795631d9b4..cd52c7f33bca 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -15,6 +15,8 @@ struct die_args {
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
+extern int register_page_fault_notifier(struct notifier_block *);
+extern int unregister_page_fault_notifier(struct notifier_block *);
extern struct atomic_notifier_head die_chain;
/* Grossly misnamed. */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 98a1e95ddb98..d36febd9bb18 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -43,6 +43,7 @@ typedef u8 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
+#define ARCH_INACTIVE_KPROBE_COUNT 1
void kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index cd17945bf218..e769e6200225 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
* This could be done better if we moved the per cpu data directly
* after GS.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (v); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(v) \
+ ({ preempt_disable(); \
+ v; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
#define __cpu_local_inc(v) cpu_local_inc(v)
#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 7229785094e3..d13687dfd691 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,13 +67,22 @@ struct mce_log {
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
-#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */
-#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4
+
+#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
+#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
+#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
+#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
+#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
+#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
+#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
+#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
#ifdef __KERNEL__
#include <asm/atomic.h>
void mce_log(struct mce *m);
+DECLARE_PER_CPU(struct sys_device, device_mce);
+
#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
#else
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 11fbee2bd6c0..06fab6de2a88 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -24,7 +24,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " decl (%%rdi) \n" \
+ LOCK_PREFIX " decl (%%rdi) \n" \
" js 2f \n" \
"1: \n" \
\
@@ -74,7 +74,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " incl (%%rdi) \n" \
+ LOCK_PREFIX " incl (%%rdi) \n" \
" jle 2f \n" \
"1: \n" \
\
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index d3abfc6a8fd5..efb45c894d76 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -5,26 +5,27 @@
#define ASM_NMI_H
#include <linux/pm.h>
+#include <asm/io.h>
struct pt_regs;
-
+
typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-
-/**
+
+/**
* set_nmi_callback
*
* Set a handler for an NMI. Only one handler may be
* set. Return 1 if the NMI was handled.
*/
void set_nmi_callback(nmi_callback_t callback);
-
-/**
+
+/**
* unset_nmi_callback
*
* Remove the handler previously set.
*/
void unset_nmi_callback(void);
-
+
#ifdef CONFIG_PM
/** Replace the PM callback routine for NMI. */
@@ -56,4 +57,21 @@ extern int unknown_nmi_panic;
extern int check_nmi_watchdog(void);
+extern void setup_apic_nmi_watchdog (void);
+extern int reserve_lapic_nmi(void);
+extern void release_lapic_nmi(void);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
+
+extern void nmi_watchdog_default(void);
+extern int setup_nmi_watchdog(char *);
+
+extern unsigned int nmi_watchdog;
+#define NMI_DEFAULT -1
+#define NMI_NONE 0
+#define NMI_IO_APIC 1
+#define NMI_LOCAL_APIC 2
+#define NMI_INVALID 3
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 2db0620d5449..49c5e9280598 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -39,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/page.h>
-#include <linux/dma-mapping.h> /* for have_iommu */
+extern void pci_iommu_alloc(void);
extern int iommu_setup(char *opt);
/* The PCI address space does equal the physical memory
@@ -52,7 +52,7 @@ extern int iommu_setup(char *opt);
*/
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-#ifdef CONFIG_GART_IOMMU
+#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
/*
* x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 31e83c3bd022..a31ab4e68a9b 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) {
/* to find an entry in a page-table-directory. */
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
-#define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
-static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
-{
- return pud + pud_index(address);
-}
-
/* PMD - Level 2 access */
#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3061a38a3b1d..3b3c1217fe61 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -69,7 +69,11 @@ struct cpuinfo_x86 {
cpumask_t llc_shared_map; /* cpus sharing the last level cache */
#endif
__u8 apicid;
+#ifdef CONFIG_SMP
__u8 booted_cores; /* number of cores as seen by OS */
+ __u8 phys_proc_id; /* Physical Processor id. */
+ __u8 cpu_core_id; /* Core id. */
+#endif
} ____cacheline_aligned;
#define X86_VENDOR_INTEL 0
@@ -96,6 +100,7 @@ extern char ignore_irq13;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern unsigned short num_cache_leaves;
/*
* EFLAGS bits
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 8abf2a43c944..038fe1f47e6f 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void);
extern void config_acpi_tables(void);
extern void ia32_syscall(void);
-extern void iommu_hole_init(void);
extern int pmtimer_mark_offset(void);
extern void pmtimer_resume(void);
@@ -75,7 +74,7 @@ extern void main_timer_handler(struct pt_regs *regs);
extern unsigned long end_pfn_map;
-extern void show_trace(unsigned long * rsp);
+extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
extern void show_registers(struct pt_regs *regs);
extern void exception_table_check(void);
@@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void);
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void gart_parse_options(char *);
-extern void __init no_iommu_init(void);
-
extern unsigned long table_start, table_end;
extern int exception_trace;
-extern int force_iommu, no_iommu;
extern int using_apic_timer;
extern int disable_apic;
extern unsigned cpu_khz;
@@ -116,7 +111,13 @@ extern int skip_ioapic_setup;
extern int acpi_ht;
extern int acpi_disabled;
-#ifdef CONFIG_GART_IOMMU
+extern void no_iommu_init(void);
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+#ifdef CONFIG_IOMMU
+extern void gart_iommu_init(void);
+extern void gart_parse_options(char *);
+extern void iommu_hole_init(void);
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int iommu_aperture;
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index 9942cc393064..dea0e9459264 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -24,7 +24,7 @@
#define RW_LOCK_BIAS_STR "0x01000000"
#define __build_read_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $1,(%0)\n\t" \
+ asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
"js 2f\n" \
"1:\n" \
LOCK_SECTION_START("") \
@@ -34,7 +34,7 @@
::"a" (rw) : "memory")
#define __build_read_lock_const(rw, helper) \
- asm volatile(LOCK "subl $1,%0\n\t" \
+ asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
"js 2f\n" \
"1:\n" \
LOCK_SECTION_START("") \
@@ -54,7 +54,7 @@
} while (0)
#define __build_write_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+ asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
"jnz 2f\n" \
"1:\n" \
LOCK_SECTION_START("") \
@@ -64,7 +64,7 @@
::"a" (rw) : "memory")
#define __build_write_lock_const(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+ asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
"jnz 2f\n" \
"1:\n" \
LOCK_SECTION_START("") \
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index a389aa6fe80f..064df08b9a0f 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__(
"# atomic down operation\n\t"
- LOCK "decl %0\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %0\n\t" /* --sem->count */
"js 2f\n"
"1:\n"
LOCK_SECTION_START("")
@@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
- LOCK "incl %0\n\t" /* ++sem->count */
+ LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
"jle 2f\n"
"1:\n"
LOCK_SECTION_START("")
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 7686b9b25aef..6805e1feb300 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info),
extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
-extern u8 phys_proc_id[NR_CPUS];
-extern u8 cpu_core_id[NR_CPUS];
extern u8 cpu_llc_id[NR_CPUS];
#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 5d8a5e3589ff..8d3421996f94 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -31,15 +31,19 @@
"jmp 1b\n" \
LOCK_SECTION_END
+#define __raw_spin_lock_string_up \
+ "\n\tdecl %0"
+
#define __raw_spin_unlock_string \
"movl $1,%0" \
:"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- __raw_spin_lock_string
- :"=m" (lock->slock) : : "memory");
+ alternative_smp(
+ __raw_spin_lock_string,
+ __raw_spin_lock_string_up,
+ "=m" (lock->slock) : : "memory");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index ee6bf275349e..9505d9f4bead 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -6,7 +6,8 @@
/* Written 2002 by Andi Kleen */
/* Only used for special circumstances. Stolen from i386/string.h */
-static inline void * __inline_memcpy(void * to, const void * from, size_t n)
+static __always_inline void *
+__inline_memcpy(void * to, const void * from, size_t n)
{
unsigned long d0, d1, d2;
__asm__ __volatile__(
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f48e0dad8b3d..68e559f3631c 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,15 +3,10 @@
#include <linux/kernel.h>
#include <asm/segment.h>
+#include <asm/alternative.h>
#ifdef __KERNEL__
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
#define __STR(x) #x
#define STR(x) __STR(x)
@@ -34,7 +29,7 @@
"thread_return:\n\t" \
"movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
- LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
+ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
"jc ret_from_fork\n\t" \
RESTORE_CONTEXT \
@@ -69,82 +64,6 @@ extern void load_gs_index(unsigned);
".previous" \
: :"r" (value), "r" (0))
-#ifdef __KERNEL__
-struct alt_instr {
- __u8 *instr; /* original instruction */
- __u8 *replacement;
- __u8 cpuid; /* cpuid bit set for replacement */
- __u8 instrlen; /* length of original instruction */
- __u8 replacementlen; /* length of new instruction, <= instrlen */
- __u8 pad[5];
-};
-#endif
-
-/*
- * Alternative instructions for different CPU types or capabilities.
- *
- * This allows to use optimized instructions even on generic binary
- * kernels.
- *
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- *
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature) : "memory")
-
-/*
- * Alternative inline assembly with input.
- *
- * Peculiarities:
- * No memory clobber here.
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement make sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), ##input)
-
-/* Like alternative_input, but with a single output argument */
-#define alternative_io(oldinstr, newinstr, feature, output, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c[feat]\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" : output : [feat] "i" (feature), ##input)
-
/*
* Clear and set 'TS' bit respectively
*/
@@ -366,5 +285,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
new file mode 100644
index 000000000000..ee51d31528d6
--- /dev/null
+++ b/include/asm-x86_64/tce.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
+ *
+ * This file is derived from asm-powerpc/tce.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_64_TCE_H
+#define _ASM_X86_64_TCE_H
+
+extern void* tce_table_kva[];
+extern unsigned int specified_table_size;
+struct iommu_table;
+
+#define TCE_ENTRY_SIZE 8 /* in bytes */
+
+#define TCE_READ_SHIFT 0
+#define TCE_WRITE_SHIFT 1
+#define TCE_HUBID_SHIFT 2 /* unused */
+#define TCE_RSVD_SHIFT 8 /* unused */
+#define TCE_RPN_SHIFT 12
+#define TCE_UNUSED_SHIFT 48 /* unused */
+
+#define TCE_RPN_MASK 0x0000fffffffff000ULL
+
+extern void tce_build(struct iommu_table *tbl, unsigned long index,
+ unsigned int npages, unsigned long uaddr, int direction);
+extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
+extern void* alloc_tce_table(void);
+extern void free_tce_table(void *tbl);
+extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar);
+
+#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 4ac0e0a36934..2029b00351f3 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void)
}
/* thread information allocation */
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define alloc_thread_info(tsk) \
+ ({ \
+ struct thread_info *ret; \
+ \
+ ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
+ if (ret) \
+ memset(ret, 0, THREAD_SIZE); \
+ ret; \
+ })
+#else
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
+#endif
+
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#else /* !__ASSEMBLY__ */
@@ -101,7 +114,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_IRET 5 /* force IRET */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+/* 16 free */
#define TIF_IA32 17 /* 32bit process */
#define TIF_FORK 18 /* ret_from_fork */
#define TIF_ABI_PENDING 19
@@ -115,7 +128,6 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_IA32 (1<<TIF_IA32)
#define _TIF_FORK (1<<TIF_FORK)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
@@ -137,6 +149,9 @@ static inline struct thread_info *stack_thread_info(void)
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active */
+#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
+
+#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 80c4e44d011c..6e7a2e976b04 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -7,8 +7,6 @@
#include <asm/mpspec.h>
#include <asm/bitops.h>
-/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
-
extern cpumask_t cpu_online_map;
extern unsigned char cpu_to_node[];
@@ -57,12 +55,12 @@ extern int __node_distance(int, int);
#endif
#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu) \
- (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
-#define topology_core_id(cpu) \
- (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
+#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
+#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
+#define smt_capable() (smp_num_siblings > 1)
#endif
#include <asm-generic/topology.h>
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
new file mode 100644
index 000000000000..f3e7124effe3
--- /dev/null
+++ b/include/asm-x86_64/unwind.h
@@ -0,0 +1,106 @@
+#ifndef _ASM_X86_64_UNWIND_H
+#define _ASM_X86_64_UNWIND_H
+
+/*
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+#ifdef CONFIG_STACK_UNWIND
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+#include <asm/vsyscall.h>
+
+struct unwind_frame_info
+{
+ struct pt_regs regs;
+ struct task_struct *task;
+};
+
+#define UNW_PC(frame) (frame)->regs.rip
+#define UNW_SP(frame) (frame)->regs.rsp
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) (frame)->regs.rbp
+#define FRAME_RETADDR_OFFSET 8
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
+#define STACK_TOP(tsk) ((tsk)->thread.rsp0)
+#endif
+/* Might need to account for the special exception and interrupt handling
+ stacks here, since normally
+ EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
+ but the construct is needed only for getting across the stack switch to
+ the interrupt stack - thus considering the IRQ stack itself is unnecessary,
+ and the overhead of comparing against all exception handling stacks seems
+ not desirable. */
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(rax), \
+ PTREGS_INFO(rdx), \
+ PTREGS_INFO(rcx), \
+ PTREGS_INFO(rbx), \
+ PTREGS_INFO(rsi), \
+ PTREGS_INFO(rdi), \
+ PTREGS_INFO(rbp), \
+ PTREGS_INFO(rsp), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(rip)
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ /*const*/ struct pt_regs *regs)
+{
+ info->regs = *regs;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ extern const char thread_return[];
+
+ memset(&info->regs, 0, sizeof(info->regs));
+ info->regs.rip = (unsigned long)thread_return;
+ info->regs.cs = __KERNEL_CS;
+ __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp);
+ info->regs.rsp = info->task->thread.rsp;
+ info->regs.ss = __KERNEL_DS;
+}
+
+extern int arch_unwind_init_running(struct unwind_frame_info *,
+ int (*callback)(struct unwind_frame_info *,
+ void *arg),
+ void *arg);
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+#if 0 /* This can only work when selector register saves/restores
+ are properly annotated (and tracked in UNW_REGISTER_INFO). */
+ return user_mode(&info->regs);
+#else
+ return (long)info->regs.rip >= 0
+ || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
+ || (long)info->regs.rsp >= 0;
+#endif
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+
+static inline int arch_unw_user_mode(const void *info)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/asm-xtensa/hw_irq.h b/include/asm-xtensa/hw_irq.h
index ccf436249eaa..3ddbea759b2b 100644
--- a/include/asm-xtensa/hw_irq.h
+++ b/include/asm-xtensa/hw_irq.h
@@ -11,8 +11,4 @@
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-{
-}
-
#endif
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index a3dae1803f45..c37c34275a44 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -37,6 +37,7 @@ extern struct key_type key_type_user;
extern int user_instantiate(struct key *key, const void *data, size_t datalen);
extern int user_update(struct key *key, const void *data, size_t datalen);
extern int user_match(const struct key *key, const void *criterion);
+extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
extern long user_read(const struct key *key,
diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h
index c35833824e11..2ed2fd855133 100644
--- a/include/linux/ac97_codec.h
+++ b/include/linux/ac97_codec.h
@@ -259,7 +259,7 @@ struct ac97_codec {
int type;
u32 model;
- int modem:1;
+ unsigned int modem:1;
struct ac97_ops *codec_ops;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 90d6df1551ed..88b5dfd8ee12 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; }
#ifdef CONFIG_ACPI_NUMA
int acpi_get_pxm(acpi_handle handle);
+int acpi_get_node(acpi_handle *handle);
#else
static inline int acpi_get_pxm(acpi_handle handle)
{
return 0;
}
+static inline int acpi_get_node(acpi_handle *handle)
+{
+ return 0;
+}
#endif
+extern int acpi_paddr_to_node(u64 start_addr, u64 size);
extern int pnpacpi_disabled;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d9ed27969855..dcc5de7cc487 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -24,6 +24,9 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
+ * Note that nbits should be always a compile time evaluable constant.
+ * Otherwise many inlines will generate horrible code.
+ *
* bitmap_zero(dst, nbits) *dst = 0UL
* bitmap_fill(dst, nbits) *dst = ~0UL
* bitmap_copy(dst, src, nbits) *dst = *src
@@ -244,6 +247,8 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
+ if (nbits <= BITS_PER_LONG)
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index fb7e9b7ccbe3..737e407d0cd1 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t);
int nobh_writepage(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
+void buffer_init(void);
/*
* inline definitions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
new file mode 100644
index 000000000000..d852024ed095
--- /dev/null
+++ b/include/linux/clocksource.h
@@ -0,0 +1,185 @@
+/* linux/include/linux/clocksource.h
+ *
+ * This file contains the structure definitions for clocksources.
+ *
+ * If you are not a clocksource, or timekeeping code, you should
+ * not be including this file!
+ */
+#ifndef _LINUX_CLOCKSOURCE_H
+#define _LINUX_CLOCKSOURCE_H
+
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <asm/div64.h>
+#include <asm/io.h>
+
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
+/**
+ * struct clocksource - hardware abstraction for a free running counter
+ * Provides mostly state-free accessors to the underlying hardware.
+ *
+ * @name: ptr to clocksource name
+ * @list: list head for registration
+ * @rating: rating value for selection (higher is better)
+ * To avoid rating inflation the following
+ * list should give you a guide as to how
+ * to assign your clocksource a rating
+ * 1-99: Unfit for real use
+ * Only available for bootup and testing purposes.
+ * 100-199: Base level usability.
+ * Functional for real use, but not desired.
+ * 200-299: Good.
+ * A correct and usable clocksource.
+ * 300-399: Desired.
+ * A reasonably fast and accurate clocksource.
+ * 400-499: Perfect
+ * The ideal clocksource. A must-use where
+ * available.
+ * @read: returns a cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ * @update_callback: called when safe to alter clocksource values
+ * @is_continuous: defines if clocksource is free-running.
+ * @cycle_interval: Used internally by timekeeping core, please ignore.
+ * @xtime_interval: Used internally by timekeeping core, please ignore.
+ */
+struct clocksource {
+ char *name;
+ struct list_head list;
+ int rating;
+ cycle_t (*read)(void);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+ int (*update_callback)(void);
+ int is_continuous;
+
+ /* timekeeping specific data, ignore */
+ cycle_t cycle_last, cycle_interval;
+ u64 xtime_nsec, xtime_interval;
+ s64 error;
+};
+
+/* simplify initialization of mask field */
+#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1)
+
+/**
+ * clocksource_khz2mult - calculates mult from khz and shift
+ * @khz: Clocksource frequency in KHz
+ * @shift_constant: Clocksource shift factor
+ *
+ * Helper functions that converts a khz counter frequency to a timsource
+ * multiplier, given the clocksource shift value
+ */
+static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
+{
+ /* khz = cyc/(Million ns)
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = 1Million/khz * 2^shift
+ * mult = 1000000 * 2^shift / khz
+ * mult = (1000000<<shift) / khz
+ */
+ u64 tmp = ((u64)1000000) << shift_constant;
+
+ tmp += khz/2; /* round for do_div */
+ do_div(tmp, khz);
+
+ return (u32)tmp;
+}
+
+/**
+ * clocksource_hz2mult - calculates mult from hz and shift
+ * @hz: Clocksource frequency in Hz
+ * @shift_constant: Clocksource shift factor
+ *
+ * Helper functions that converts a hz counter
+ * frequency to a timsource multiplier, given the
+ * clocksource shift value
+ */
+static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
+{
+ /* hz = cyc/(Billion ns)
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = 1Billion/hz * 2^shift
+ * mult = 1000000000 * 2^shift / hz
+ * mult = (1000000000<<shift) / hz
+ */
+ u64 tmp = ((u64)1000000000) << shift_constant;
+
+ tmp += hz/2; /* round for do_div */
+ do_div(tmp, hz);
+
+ return (u32)tmp;
+}
+
+/**
+ * clocksource_read: - Access the clocksource's current cycle value
+ * @cs: pointer to clocksource being read
+ *
+ * Uses the clocksource to return the current cycle_t value
+ */
+static inline cycle_t clocksource_read(struct clocksource *cs)
+{
+ return cs->read();
+}
+
+/**
+ * cyc2ns - converts clocksource cycles to nanoseconds
+ * @cs: Pointer to clocksource
+ * @cycles: Cycles
+ *
+ * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
+ *
+ * XXX - This could use some mult_lxl_ll() asm optimization
+ */
+static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
+{
+ u64 ret = (u64)cycles;
+ ret = (ret * cs->mult) >> cs->shift;
+ return ret;
+}
+
+/**
+ * clocksource_calculate_interval - Calculates a clocksource interval struct
+ *
+ * @c: Pointer to clocksource.
+ * @length_nsec: Desired interval length in nanoseconds.
+ *
+ * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
+ * pair and interval request.
+ *
+ * Unless you're the timekeeping code, you should not be using this!
+ */
+static inline void clocksource_calculate_interval(struct clocksource *c,
+ unsigned long length_nsec)
+{
+ u64 tmp;
+
+ /* XXX - All of this could use a whole lot of optimization */
+ tmp = length_nsec;
+ tmp <<= c->shift;
+ tmp += c->mult/2;
+ do_div(tmp, c->mult);
+
+ c->cycle_interval = (cycle_t)tmp;
+ if (c->cycle_interval == 0)
+ c->cycle_interval = 1;
+
+ c->xtime_interval = (u64)c->cycle_interval * c->mult;
+}
+
+
+/* used to install a new clocksource */
+int clocksource_register(struct clocksource*);
+void clocksource_reselect(void);
+struct clocksource* clocksource_get_next(void);
+
+#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index 7b5c5df5cb69..be512cc98791 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -27,8 +27,8 @@ extern struct inode_operations coda_dir_inode_operations;
extern struct inode_operations coda_file_inode_operations;
extern struct inode_operations coda_ioctl_inode_operations;
-extern struct address_space_operations coda_file_aops;
-extern struct address_space_operations coda_symlink_aops;
+extern const struct address_space_operations coda_file_aops;
+extern const struct address_space_operations coda_symlink_aops;
extern const struct file_operations coda_dir_operations;
extern const struct file_operations coda_file_operations;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index dda1697ec753..9760753e662b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -226,5 +226,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+extern int compat_printk(const char *fmt, ...);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 89ab677cb993..917d62e41480 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -673,6 +673,11 @@ COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
+/* Siemens Gigaset */
+COMPATIBLE_IOCTL(GIGASET_REDIR)
+COMPATIBLE_IOCTL(GIGASET_CONFIG)
+COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
+COMPATIBLE_IOCTL(GIGASET_VERSION)
/* Misc. */
COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
diff --git a/include/linux/console.h b/include/linux/console.h
index d0f8a8009490..3bdf2155e565 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -63,9 +63,11 @@ extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
extern const struct consw prom_con; /* SPARC PROM console */
+int con_is_bound(const struct consw *csw);
+int register_con_driver(const struct consw *csw, int first, int last);
+int unregister_con_driver(const struct consw *csw);
int take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
-
/* scroll */
#define SM_UP (1)
#define SM_DOWN (2)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 08d50c53aab4..a3caf6866bae 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -31,17 +31,23 @@ struct cpu {
struct sys_device sysdev;
};
-extern int register_cpu(struct cpu *, int, struct node *);
+extern int register_cpu(struct cpu *cpu, int num);
extern struct sys_device *get_cpu_sysdev(unsigned cpu);
#ifdef CONFIG_HOTPLUG_CPU
-extern void unregister_cpu(struct cpu *, struct node *);
+extern void unregister_cpu(struct cpu *cpu);
#endif
struct notifier_block;
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
extern int register_cpu_notifier(struct notifier_block *nb);
+#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu_notifier(struct notifier_block *nb);
+#else
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+#endif
extern int current_in_cpu_hotplug(void);
int cpu_up(unsigned int cpu);
@@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void);
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
+#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#else
@@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu);
#define unlock_cpu_hotplug() do { } while (0)
#define lock_cpu_hotplug_interruptible() 0
#define hotcpu_notifier(fn, pri)
+#define register_hotcpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb)
/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 5a0470e36111..7f946241b879 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -66,7 +66,7 @@ struct crypto_tfm;
struct cipher_desc {
struct crypto_tfm *tfm;
- void (*crfn)(void *ctx, u8 *dst, const u8 *src);
+ void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
const u8 *src, unsigned int nbytes);
void *info;
@@ -79,10 +79,10 @@ struct cipher_desc {
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
- int (*cia_setkey)(void *ctx, const u8 *key,
+ int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags);
- void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
- void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
+ void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
@@ -100,20 +100,19 @@ struct cipher_alg {
struct digest_alg {
unsigned int dia_digestsize;
- void (*dia_init)(void *ctx);
- void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
- void (*dia_final)(void *ctx, u8 *out);
- int (*dia_setkey)(void *ctx, const u8 *key,
+ void (*dia_init)(struct crypto_tfm *tfm);
+ void (*dia_update)(struct crypto_tfm *tfm, const u8 *data,
+ unsigned int len);
+ void (*dia_final)(struct crypto_tfm *tfm, u8 *out);
+ int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags);
};
struct compress_alg {
- int (*coa_init)(void *ctx);
- void (*coa_exit)(void *ctx);
- int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
+ int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
};
#define cra_cipher cra_u.cipher
@@ -129,14 +128,17 @@ struct crypto_alg {
int cra_priority;
- const char cra_name[CRYPTO_MAX_ALG_NAME];
- const char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+ char cra_name[CRYPTO_MAX_ALG_NAME];
+ char cra_driver_name[CRYPTO_MAX_ALG_NAME];
union {
struct cipher_alg cipher;
struct digest_alg digest;
struct compress_alg compress;
} cra_u;
+
+ int (*cra_init)(struct crypto_tfm *tfm);
+ void (*cra_exit)(struct crypto_tfm *tfm);
struct module *cra_module;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index aee10b2ea4c6..e3d1c33d1558 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -8,9 +8,12 @@
#ifndef _LINUX_DEVICE_MAPPER_H
#define _LINUX_DEVICE_MAPPER_H
+#ifdef __KERNEL__
+
struct dm_target;
struct dm_table;
struct dm_dev;
+struct mapped_device;
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -78,7 +81,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d);
struct target_type {
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -128,4 +131,108 @@ struct dm_target {
int dm_register_target(struct target_type *t);
int dm_unregister_target(struct target_type *t);
-#endif /* _LINUX_DEVICE_MAPPER_H */
+
+/*-----------------------------------------------------------------
+ * Functions for creating and manipulating mapped devices.
+ * Drop the reference with dm_put when you finish with the object.
+ *---------------------------------------------------------------*/
+
+/*
+ * DM_ANY_MINOR chooses the next available minor number.
+ */
+#define DM_ANY_MINOR (-1)
+int dm_create(int minor, struct mapped_device **md);
+
+/*
+ * Reference counting for md.
+ */
+struct mapped_device *dm_get_md(dev_t dev);
+void dm_get(struct mapped_device *md);
+void dm_put(struct mapped_device *md);
+
+/*
+ * An arbitrary pointer may be stored alongside a mapped device.
+ */
+void dm_set_mdptr(struct mapped_device *md, void *ptr);
+void *dm_get_mdptr(struct mapped_device *md);
+
+/*
+ * A device can still be used while suspended, but I/O is deferred.
+ */
+int dm_suspend(struct mapped_device *md, int with_lockfs);
+int dm_resume(struct mapped_device *md);
+
+/*
+ * Event functions.
+ */
+uint32_t dm_get_event_nr(struct mapped_device *md);
+int dm_wait_event(struct mapped_device *md, int event_nr);
+
+/*
+ * Info functions.
+ */
+const char *dm_device_name(struct mapped_device *md);
+struct gendisk *dm_disk(struct mapped_device *md);
+int dm_suspended(struct mapped_device *md);
+
+/*
+ * Geometry functions.
+ */
+int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
+int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
+
+
+/*-----------------------------------------------------------------
+ * Functions for manipulating device-mapper tables.
+ *---------------------------------------------------------------*/
+
+/*
+ * First create an empty table.
+ */
+int dm_table_create(struct dm_table **result, int mode,
+ unsigned num_targets, struct mapped_device *md);
+
+/*
+ * Then call this once for each target.
+ */
+int dm_table_add_target(struct dm_table *t, const char *type,
+ sector_t start, sector_t len, char *params);
+
+/*
+ * Finally call this to make the table ready for use.
+ */
+int dm_table_complete(struct dm_table *t);
+
+/*
+ * Table reference counting.
+ */
+struct dm_table *dm_get_table(struct mapped_device *md);
+void dm_table_get(struct dm_table *t);
+void dm_table_put(struct dm_table *t);
+
+/*
+ * Queries
+ */
+sector_t dm_table_get_size(struct dm_table *t);
+unsigned int dm_table_get_num_targets(struct dm_table *t);
+int dm_table_get_mode(struct dm_table *t);
+struct mapped_device *dm_table_get_md(struct dm_table *t);
+
+/*
+ * Trigger an event.
+ */
+void dm_table_event(struct dm_table *t);
+
+/*
+ * The device must be suspended before calling this method.
+ */
+int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+
+/*
+ * Prepare a table for a device that will error all I/O.
+ * To make it active, call dm_suspend(), dm_swap_table() then dm_resume().
+ */
+int dm_create_error_table(struct dm_table **result, struct mapped_device *md);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index c67c6786612a..9623bb625090 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -285,9 +285,9 @@ typedef char ioctl_struct[308];
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 6
+#define DM_VERSION_MINOR 7
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2006-02-17)"
+#define DM_VERSION_EXTRA "-ioctl (2006-06-24)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -314,7 +314,7 @@ typedef char ioctl_struct[308];
#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
/*
- * Set this to improve performance when you aren't going to use open_count.
+ * This flag is now ignored.
*/
#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 78b236ca04f8..272010a6078a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -20,7 +20,7 @@
*/
#ifndef DMAENGINE_H
#define DMAENGINE_H
-#include <linux/config.h>
+
#ifdef CONFIG_DMA_ENGINE
#include <linux/device.h>
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index fbfa6b52e2fb..278ef4495819 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -38,7 +38,7 @@ struct statfs;
extern struct inode_operations efs_dir_inode_operations;
extern const struct file_operations efs_dir_operations;
-extern struct address_space_operations efs_symlink_aops;
+extern const struct address_space_operations efs_symlink_aops;
extern void efs_read_inode(struct inode *);
extern efs_block_t efs_map_block(struct inode *, efs_block_t);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f1281687e549..07a08e92bc73 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -504,23 +504,19 @@ struct fb_cursor_user {
#define FB_EVENT_MODE_DELETE 0x04
/* A driver registered itself */
#define FB_EVENT_FB_REGISTERED 0x05
+/* A driver unregistered itself */
+#define FB_EVENT_FB_UNREGISTERED 0x06
/* CONSOLE-SPECIFIC: get console to framebuffer mapping */
-#define FB_EVENT_GET_CONSOLE_MAP 0x06
+#define FB_EVENT_GET_CONSOLE_MAP 0x07
/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
-#define FB_EVENT_SET_CONSOLE_MAP 0x07
+#define FB_EVENT_SET_CONSOLE_MAP 0x08
/* A display blank is requested */
-#define FB_EVENT_BLANK 0x08
+#define FB_EVENT_BLANK 0x09
/* Private modelist is to be replaced */
-#define FB_EVENT_NEW_MODELIST 0x09
+#define FB_EVENT_NEW_MODELIST 0x0A
/* The resolution of the passed in fb_info about to change and
all vc's should be changed */
-#define FB_EVENT_MODE_CHANGE_ALL 0x0A
-/* CONSOLE-SPECIFIC: set console rotation */
-#define FB_EVENT_SET_CON_ROTATE 0x0B
-/* CONSOLE-SPECIFIC: get console rotation */
-#define FB_EVENT_GET_CON_ROTATE 0x0C
-/* CONSOLE-SPECIFIC: rotate all consoles */
-#define FB_EVENT_SET_CON_ROTATE_ALL 0x0D
+#define FB_EVENT_MODE_CHANGE_ALL 0x0B
struct fb_event {
struct fb_info *info;
@@ -892,7 +888,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix);
extern int fb_get_options(char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern int fb_con_duit(struct fb_info *info, int event, void *data);
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2d8b348c1192..e04a5cfe874f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -392,7 +392,7 @@ struct address_space {
unsigned int truncate_count; /* Cover race condition with truncate */
unsigned long nrpages; /* number of total pages */
pgoff_t writeback_index;/* writeback starts here */
- struct address_space_operations *a_ops; /* methods */
+ const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits/gfp mask */
struct backing_dev_info *backing_dev_info; /* device readahead, etc */
spinlock_t private_lock; /* for use by the address_space */
@@ -1405,7 +1405,7 @@ extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, unsigned);
extern const struct file_operations def_blk_fops;
-extern struct address_space_operations def_blk_aops;
+extern const struct address_space_operations def_blk_aops;
extern const struct file_operations def_chr_fops;
extern const struct file_operations bad_sock_fops;
extern const struct file_operations def_fifo_fops;
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 966a5b3da439..34c3a215f2cd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -12,6 +12,9 @@
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
/*
* Support for robust futexes: the kernel cleans up held futexes at
@@ -90,18 +93,21 @@ struct robust_list_head {
*/
#define ROBUST_LIST_LIMIT 2048
-long do_futex(unsigned long uaddr, int op, int val,
- unsigned long timeout, unsigned long uaddr2, int val2,
- int val3);
+long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
+extern void exit_pi_state_list(struct task_struct *curr);
#else
static inline void exit_robust_list(struct task_struct *curr)
{
}
+static inline void exit_pi_state_list(struct task_struct *curr)
+{
+}
#endif
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
new file mode 100644
index 000000000000..21ea7610e177
--- /dev/null
+++ b/include/linux/hw_random.h
@@ -0,0 +1,50 @@
+/*
+ Hardware Random Number Generator
+
+ Please read Documentation/hw_random.txt for details on use.
+
+ ----------------------------------------------------------
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ */
+
+#ifndef LINUX_HWRANDOM_H_
+#define LINUX_HWRANDOM_H_
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/**
+ * struct hwrng - Hardware Random Number Generator driver
+ * @name: Unique RNG name.
+ * @init: Initialization callback (can be NULL).
+ * @cleanup: Cleanup callback (can be NULL).
+ * @data_present: Callback to determine if data is available
+ * on the RNG. If NULL, it is assumed that
+ * there is always data available.
+ * @data_read: Read data from the RNG device.
+ * Returns the number of lower random bytes in "data".
+ * Must not be NULL.
+ * @priv: Private data, for use by the RNG driver.
+ */
+struct hwrng {
+ const char *name;
+ int (*init)(struct hwrng *rng);
+ void (*cleanup)(struct hwrng *rng);
+ int (*data_present)(struct hwrng *rng);
+ int (*data_read)(struct hwrng *rng, u32 *data);
+ unsigned long priv;
+
+ /* internal. */
+ struct list_head list;
+};
+
+/** Register a new Hardware Random Number Generator driver. */
+extern int hwrng_register(struct hwrng *rng);
+/** Unregister a Hardware Random Number Generator driver. */
+extern void hwrng_unregister(struct hwrng *rng);
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ef7bef207f48..0c100168c0cf 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -793,6 +793,7 @@ typedef struct hwif_s {
unsigned auto_poll : 1; /* supports nop auto-poll */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */
+ unsigned err_stops_fifo : 1; /* 1=data FIFO is cleared by an error */
struct device gendev;
struct completion gendev_rel_comp; /* To deal with device release() */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index d37c8d808b0f..f559a719dbe8 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -78,6 +78,7 @@ void *idr_find(struct idr *idp, int id);
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
+void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 41ecbb847f32..3a256957fb56 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -87,6 +87,7 @@ extern struct group_info init_groups;
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
+ .normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
@@ -119,10 +120,11 @@ extern struct group_info init_groups;
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
- .proc_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = SPIN_LOCK_UNLOCKED, \
+ INIT_RT_MUTEXES(tsk) \
}
diff --git a/include/linux/input.h b/include/linux/input.h
index b32c2b6e53f6..56f1e0e1e598 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -232,7 +232,8 @@ struct input_absinfo {
#define KEY_PAUSE 119
#define KEY_KPCOMMA 121
-#define KEY_HANGUEL 122
+#define KEY_HANGEUL 122
+#define KEY_HANGUEL KEY_HANGEUL
#define KEY_HANJA 123
#define KEY_YEN 124
#define KEY_LEFTMETA 125
@@ -1005,6 +1006,7 @@ static inline void init_input_dev(struct input_dev *dev)
}
struct input_dev *input_allocate_device(void);
+void input_free_device(struct input_dev *dev);
static inline struct input_dev *input_get_device(struct input_dev *dev)
{
@@ -1016,12 +1018,6 @@ static inline void input_put_device(struct input_dev *dev)
class_device_put(&dev->cdev);
}
-static inline void input_free_device(struct input_dev *dev)
-{
- if (dev)
- input_put_device(dev);
-}
-
int input_register_device(struct input_dev *);
void input_unregister_device(struct input_dev *);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 70741e170114..db2a63a11633 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -36,6 +36,20 @@ extern void free_irq(unsigned int, void *);
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+
+/* IRQ wakeup (PM) control: */
+extern int set_irq_wake(unsigned int irq, unsigned int on);
+
+static inline int enable_irq_wake(unsigned int irq)
+{
+ return set_irq_wake(irq, 1);
+}
+
+static inline int disable_irq_wake(unsigned int irq)
+{
+ return set_irq_wake(irq, 0);
+}
+
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index cd6bd001ba4e..87a9fc039b47 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -9,13 +9,15 @@
#define _LINUX_IOPORT_H
#include <linux/compiler.h>
+#include <linux/types.h>
/*
* Resources are tree-like, allowing
* nesting etc..
*/
struct resource {
+ resource_size_t start;
+ resource_size_t end;
const char *name;
- unsigned long start, end;
unsigned long flags;
struct resource *parent, *sibling, *child;
};
@@ -96,31 +98,37 @@ extern struct resource * ____request_resource(struct resource *root, struct reso
extern int release_resource(struct resource *new);
extern __deprecated_for_modules int insert_resource(struct resource *parent, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
- unsigned long size,
- unsigned long min, unsigned long max,
- unsigned long align,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
+ resource_size_t, resource_size_t),
void *alignf_data);
-int adjust_resource(struct resource *res, unsigned long start,
- unsigned long size);
+int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+
+/* get registered SYSTEM_RAM resources in specified area */
+extern int find_next_system_ram(struct resource *res);
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
-extern struct resource * __request_region(struct resource *, unsigned long start, unsigned long n, const char *name);
+extern struct resource * __request_region(struct resource *,
+ resource_size_t start,
+ resource_size_t n, const char *name);
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
-extern int __check_region(struct resource *, unsigned long, unsigned long);
-extern void __release_region(struct resource *, unsigned long, unsigned long);
+extern int __check_region(struct resource *, resource_size_t, resource_size_t);
+extern void __release_region(struct resource *, resource_size_t,
+ resource_size_t);
-static inline int __deprecated check_region(unsigned long s, unsigned long n)
+static inline int __deprecated check_region(resource_size_t s,
+ resource_size_t n)
{
return __check_region(&ioport_resource, s, n);
}
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 5653b2f23b6a..d09fbeabf1dc 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -210,11 +210,7 @@ struct kernel_ipmi_msg
#include <linux/list.h>
#include <linux/module.h>
#include <linux/device.h>
-
-#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
-extern struct proc_dir_entry *proc_ipmi_root;
-#endif /* CONFIG_PROC_FS */
/* Opaque type for a IPMI message user. One of these is needed to
send and receive messages. */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 676e00dfb21a..0832149cdb18 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1,5 +1,5 @@
-#ifndef __irq_h
-#define __irq_h
+#ifndef _LINUX_IRQ_H
+#define _LINUX_IRQ_H
/*
* Please do not include this file in generic code. There is currently
@@ -11,7 +11,7 @@
#include <linux/smp.h>
-#if !defined(CONFIG_S390)
+#ifndef CONFIG_S390
#include <linux/linkage.h>
#include <linux/cache.h>
@@ -33,75 +33,160 @@
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
#define IRQ_LEVEL 64 /* IRQ level triggered */
#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
-#if defined(ARCH_HAS_IRQ_PER_CPU)
+#ifdef CONFIG_IRQ_PER_CPU
# define IRQ_PER_CPU 256 /* IRQ is per CPU */
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
#else
# define CHECK_IRQ_PER_CPU(var) 0
#endif
+#define IRQ_NOPROBE 512 /* IRQ is not valid for probing */
+#define IRQ_NOREQUEST 1024 /* IRQ cannot be requested */
+#define IRQ_NOAUTOEN 2048 /* IRQ will not be enabled on request irq */
+#define IRQ_DELAYED_DISABLE \
+ 4096 /* IRQ disable (masking) happens delayed. */
+
/*
- * Interrupt controller descriptor. This is all we need
- * to describe about the low-level hardware.
+ * IRQ types, see also include/linux/interrupt.h
*/
-struct hw_interrupt_type {
- const char * typename;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
- void (*ack)(unsigned int irq);
- void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq, cpumask_t dest);
+#define IRQ_TYPE_NONE 0x0000 /* Default, unspecified type */
+#define IRQ_TYPE_EDGE_RISING 0x0001 /* Edge rising type */
+#define IRQ_TYPE_EDGE_FALLING 0x0002 /* Edge falling type */
+#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
+#define IRQ_TYPE_LEVEL_HIGH 0x0004 /* Level high type */
+#define IRQ_TYPE_LEVEL_LOW 0x0008 /* Level low type */
+#define IRQ_TYPE_SENSE_MASK 0x000f /* Mask of the above */
+#define IRQ_TYPE_SIMPLE 0x0010 /* Simple type */
+#define IRQ_TYPE_PERCPU 0x0020 /* Per CPU type */
+#define IRQ_TYPE_PROBE 0x0040 /* Probing in progress */
+
+struct proc_dir_entry;
+
+/**
+ * struct irq_chip - hardware interrupt chip descriptor
+ *
+ * @name: name for /proc/interrupts
+ * @startup: start up the interrupt (defaults to ->enable if NULL)
+ * @shutdown: shut down the interrupt (defaults to ->disable if NULL)
+ * @enable: enable the interrupt (defaults to chip->unmask if NULL)
+ * @disable: disable the interrupt (defaults to chip->mask if NULL)
+ * @ack: start of a new interrupt
+ * @mask: mask an interrupt source
+ * @mask_ack: ack and mask an interrupt source
+ * @unmask: unmask an interrupt source
+ * @eoi: end of interrupt - chip level
+ * @end: end of interrupt - flow level
+ * @set_affinity: set the CPU affinity on SMP machines
+ * @retrigger: resend an IRQ to the CPU
+ * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
+ * @set_wake: enable/disable power-management wake-on of an IRQ
+ *
+ * @release: release function solely used by UML
+ * @typename: obsoleted by name, kept as migration helper
+ */
+struct irq_chip {
+ const char *name;
+ unsigned int (*startup)(unsigned int irq);
+ void (*shutdown)(unsigned int irq);
+ void (*enable)(unsigned int irq);
+ void (*disable)(unsigned int irq);
+
+ void (*ack)(unsigned int irq);
+ void (*mask)(unsigned int irq);
+ void (*mask_ack)(unsigned int irq);
+ void (*unmask)(unsigned int irq);
+ void (*eoi)(unsigned int irq);
+
+ void (*end)(unsigned int irq);
+ void (*set_affinity)(unsigned int irq, cpumask_t dest);
+ int (*retrigger)(unsigned int irq);
+ int (*set_type)(unsigned int irq, unsigned int flow_type);
+ int (*set_wake)(unsigned int irq, unsigned int on);
+
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
- void (*release)(unsigned int irq, void *dev_id);
+ void (*release)(unsigned int irq, void *dev_id);
#endif
+ /*
+ * For compatibility, ->typename is copied into ->name.
+ * Will disappear.
+ */
+ const char *typename;
};
-typedef struct hw_interrupt_type hw_irq_controller;
-
-/*
- * This is the "IRQ descriptor", which contains various information
- * about the irq, including what kind of hardware handling it has,
- * whether it is disabled etc etc.
+/**
+ * struct irq_desc - interrupt descriptor
+ *
+ * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
+ * @chip: low level interrupt hardware access
+ * @handler_data: per-IRQ data for the irq_chip methods
+ * @chip_data: platform-specific per-chip private data for the chip
+ * methods, to allow shared chip implementations
+ * @action: the irq action chain
+ * @status: status information
+ * @depth: disable-depth, for nested irq_disable() calls
+ * @irq_count: stats field to detect stalled irqs
+ * @irqs_unhandled: stats field for spurious unhandled interrupts
+ * @lock: locking for SMP
+ * @affinity: IRQ affinity on SMP
+ * @cpu: cpu index useful for balancing
+ * @pending_mask: pending rebalanced interrupts
+ * @move_irq: need to re-target IRQ destination
+ * @dir: /proc/irq/ procfs entry
+ * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
*
* Pad this out to 32 bytes for cache and indexing reasons.
*/
-typedef struct irq_desc {
- hw_irq_controller *handler;
- void *handler_data;
- struct irqaction *action; /* IRQ action list */
- unsigned int status; /* IRQ status */
- unsigned int depth; /* nested irq disables */
- unsigned int irq_count; /* For detecting broken interrupts */
- unsigned int irqs_unhandled;
- spinlock_t lock;
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
- unsigned int move_irq; /* Flag need to re-target intr dest*/
+struct irq_desc {
+ void fastcall (*handle_irq)(unsigned int irq,
+ struct irq_desc *desc,
+ struct pt_regs *regs);
+ struct irq_chip *chip;
+ void *handler_data;
+ void *chip_data;
+ struct irqaction *action; /* IRQ action list */
+ unsigned int status; /* IRQ status */
+
+ unsigned int depth; /* nested irq disables */
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned int irqs_unhandled;
+ spinlock_t lock;
+#ifdef CONFIG_SMP
+ cpumask_t affinity;
+ unsigned int cpu;
+#endif
+#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+ cpumask_t pending_mask;
+ unsigned int move_irq; /* need to re-target IRQ dest */
#endif
-} ____cacheline_aligned irq_desc_t;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *dir;
+#endif
+} ____cacheline_aligned;
-extern irq_desc_t irq_desc [NR_IRQS];
+extern struct irq_desc irq_desc[NR_IRQS];
-/* Return a pointer to the irq descriptor for IRQ. */
-static inline irq_desc_t *
-irq_descp (int irq)
-{
- return irq_desc + irq;
-}
+/*
+ * Migration helpers for obsolete names, they will go away:
+ */
+#define hw_interrupt_type irq_chip
+typedef struct irq_chip hw_irq_controller;
+#define no_irq_type no_irq_chip
+typedef struct irq_desc irq_desc_t;
-#include <asm/hw_irq.h> /* the arch dependent stuff */
+/*
+ * Pick up the arch-dependent methods:
+ */
+#include <asm/hw_irq.h>
-extern int setup_irq(unsigned int irq, struct irqaction * new);
+extern int setup_irq(unsigned int irq, struct irqaction *new);
#ifdef CONFIG_GENERIC_HARDIRQS
-extern cpumask_t irq_affinity[NR_IRQS];
#ifdef CONFIG_SMP
static inline void set_native_irq_info(int irq, cpumask_t mask)
{
- irq_affinity[irq] = mask;
+ irq_desc[irq].affinity = mask;
}
#else
static inline void set_native_irq_info(int irq, cpumask_t mask)
@@ -111,8 +196,7 @@ static inline void set_native_irq_info(int irq, cpumask_t mask)
#ifdef CONFIG_SMP
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-extern cpumask_t pending_irq_cpumask[NR_IRQS];
+#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
void set_pending_irq(unsigned int irq, cpumask_t mask);
void move_native_irq(int irq);
@@ -133,7 +217,7 @@ static inline void set_irq_info(int irq, cpumask_t mask)
{
}
-#else // CONFIG_PCI_MSI
+#else /* CONFIG_PCI_MSI */
static inline void move_irq(int irq)
{
@@ -144,26 +228,36 @@ static inline void set_irq_info(int irq, cpumask_t mask)
{
set_native_irq_info(irq, mask);
}
-#endif // CONFIG_PCI_MSI
-#else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
+#endif /* CONFIG_PCI_MSI */
+
+#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
+
+static inline void move_irq(int irq)
+{
+}
+
+static inline void move_native_irq(int irq)
+{
+}
+
+static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
+{
+}
-#define move_irq(x)
-#define move_native_irq(x)
-#define set_pending_irq(x,y)
static inline void set_irq_info(int irq, cpumask_t mask)
{
set_native_irq_info(irq, mask);
}
-#endif // CONFIG_GENERIC_PENDING_IRQ
+#endif /* CONFIG_GENERIC_PENDING_IRQ */
-#else // CONFIG_SMP
+#else /* CONFIG_SMP */
#define move_irq(x)
#define move_native_irq(x)
-#endif // CONFIG_SMP
+#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQBALANCE
extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
@@ -173,32 +267,138 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
}
#endif
+#ifdef CONFIG_AUTO_IRQ_AFFINITY
+extern int select_smp_affinity(unsigned int irq);
+#else
+static inline int select_smp_affinity(unsigned int irq)
+{
+ return 1;
+}
+#endif
+
extern int no_irq_affinity;
-extern int noirqdebug_setup(char *str);
-extern fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
- struct irqaction *action);
+/* Handle irq action chains: */
+extern int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+ struct irqaction *action);
+
+/*
+ * Built-in IRQ handlers for various IRQ types,
+ * callable via desc->chip->handle_irq()
+ */
+extern void fastcall
+handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
+extern void fastcall
+handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc,
+ struct pt_regs *regs);
+extern void fastcall
+handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
+extern void fastcall
+handle_simple_irq(unsigned int irq, struct irq_desc *desc,
+ struct pt_regs *regs);
+extern void fastcall
+handle_percpu_irq(unsigned int irq, struct irq_desc *desc,
+ struct pt_regs *regs);
+extern void fastcall
+handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
+
+/*
+ * Get a descriptive string for the highlevel handler, for
+ * /proc/interrupts output:
+ */
+extern const char *
+handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *,
+ struct pt_regs *));
+
+/*
+ * Monolithic do_IRQ implementation.
+ * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
+ */
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
-extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
- int action_ret, struct pt_regs *regs);
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+/*
+ * Architectures call this to let the generic IRQ layer
+ * handle an interrupt. If the descriptor is attached to an
+ * irqchip-style controller then we call the ->handle_irq() handler,
+ * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ */
+static inline void generic_handle_irq(unsigned int irq, struct pt_regs *regs)
+{
+ struct irq_desc *desc = irq_desc + irq;
+
+ if (likely(desc->handle_irq))
+ desc->handle_irq(irq, desc, regs);
+ else
+ __do_IRQ(irq, regs);
+}
+
+/* Handling of unhandled and spurious interrupts: */
+extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ int action_ret, struct pt_regs *regs);
+
+/* Resending of interrupts :*/
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+
+/* Initialize /proc/irq/ */
extern void init_irq_proc(void);
-#ifdef CONFIG_AUTO_IRQ_AFFINITY
-extern int select_smp_affinity(unsigned int irq);
-#else
-static inline int
-select_smp_affinity(unsigned int irq)
+/* Enable/disable irq debugging output: */
+extern int noirqdebug_setup(char *str);
+
+/* Checks whether the interrupt can be requested by request_irq(): */
+extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+
+/* Dummy irq-chip implementation: */
+extern struct irq_chip no_irq_chip;
+
+extern void
+set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ void fastcall (*handle)(unsigned int,
+ struct irq_desc *,
+ struct pt_regs *));
+extern void
+__set_irq_handler(unsigned int irq,
+ void fastcall (*handle)(unsigned int, struct irq_desc *,
+ struct pt_regs *),
+ int is_chained);
+
+/*
+ * Set a highlevel flow handler for a given IRQ:
+ */
+static inline void
+set_irq_handler(unsigned int irq,
+ void fastcall (*handle)(unsigned int, struct irq_desc *,
+ struct pt_regs *))
{
- return 1;
+ __set_irq_handler(irq, handle, 0);
}
-#endif
-#endif
+/*
+ * Set a highlevel chained flow handler for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST and IRQ_NOPROBE)
+ */
+static inline void
+set_irq_chained_handler(unsigned int irq,
+ void fastcall (*handle)(unsigned int, struct irq_desc *,
+ struct pt_regs *))
+{
+ __set_irq_handler(irq, handle, 1);
+}
-extern hw_irq_controller no_irq_type; /* needed in every arch ? */
+/* Set/get chip/data for an IRQ: */
-#endif
+extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
+extern int set_irq_data(unsigned int irq, void *data);
+extern int set_irq_chip_data(unsigned int irq, void *data);
+extern int set_irq_type(unsigned int irq, unsigned int type);
+
+#define get_irq_chip(irq) (irq_desc[irq].chip)
+#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
+#define get_irq_data(irq) (irq_desc[irq].handler_data)
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
+
+#endif /* !CONFIG_S390 */
-#endif /* __irq_h */
+#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/isdn/tpam.h b/include/linux/isdn/tpam.h
deleted file mode 100644
index d18dd0dc570d..000000000000
--- a/include/linux/isdn/tpam.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* $Id: tpam.h,v 1.1.2.1 2001/06/08 08:23:46 kai Exp $
- *
- * Turbo PAM ISDN driver for Linux. (Kernel Driver)
- *
- * Copyright 2001 Stelian Pop <stelian.pop@fr.alcove.com>, Alcôve
- *
- * For all support questions please contact: <support@auvertech.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef _TPAM_H_
-#define _TPAM_H_
-
-#include <linux/types.h>
-
-/* IOCTL commands */
-#define TPAM_CMD_DSPLOAD 0x0001
-#define TPAM_CMD_DSPSAVE 0x0002
-#define TPAM_CMD_DSPRUN 0x0003
-#define TPAM_CMD_LOOPMODEON 0x0004
-#define TPAM_CMD_LOOPMODEOFF 0x0005
-
-/* addresses of debug information zones on board */
-#define TPAM_TRAPAUDIT_REGISTER 0x005493e4
-#define TPAM_NCOAUDIT_REGISTER 0x00500000
-#define TPAM_MSGAUDIT_REGISTER 0x008E30F0
-
-/* length of debug information zones on board */
-#define TPAM_TRAPAUDIT_LENGTH 10000
-#define TPAM_NCOAUDIT_LENGTH 300000
-#define TPAM_NCOAUDIT_COUNT 30
-#define TPAM_MSGAUDIT_LENGTH 60000
-
-/* IOCTL load/save parameter */
-typedef struct tpam_dsp_ioctl {
- __u32 address; /* address to load/save data */
- __u32 data_len; /* size of data to be loaded/saved */
- __u8 data[0]; /* data */
-} tpam_dsp_ioctl;
-
-#endif /* _TPAM_H_ */
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index c6f70660b371..c9c760700bc3 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -186,6 +186,7 @@ struct jffs2_raw_xref
jint32_t hdr_crc;
jint32_t ino; /* inode number */
jint32_t xid; /* XATTR identifier number */
+ jint32_t xseqno; /* xref sequencial number */
jint32_t node_crc;
} __attribute__((packed));
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 4eb851ece080..efe0ee4cc80b 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -155,10 +155,8 @@ static inline void con_schedule_flip(struct tty_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&t->buf.lock, flags);
- if (t->buf.tail != NULL) {
- t->buf.tail->active = 0;
+ if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used;
- }
spin_unlock_irqrestore(&t->buf.lock, flags);
schedule_work(&t->buf.work);
}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3c5e4c2e517d..5c1ec1f84eab 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -32,6 +32,7 @@ extern const char linux_banner[];
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
@@ -336,6 +337,12 @@ struct sysinfo {
/* Force a compilation error if condition is true */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
+
/* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__)
diff --git a/include/linux/key.h b/include/linux/key.h
index e81ebf910d0b..169f05e4863e 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -177,7 +177,8 @@ struct key {
/*
* kernel managed key type definition
*/
-typedef int (*request_key_actor_t)(struct key *key, struct key *authkey, const char *op);
+typedef int (*request_key_actor_t)(struct key *key, struct key *authkey,
+ const char *op, void *aux);
struct key_type {
/* name of the type */
@@ -248,7 +249,14 @@ extern struct key *key_alloc(struct key_type *type,
const char *desc,
uid_t uid, gid_t gid,
struct task_struct *ctx,
- key_perm_t perm, int not_in_quota);
+ key_perm_t perm,
+ unsigned long flags);
+
+
+#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
+#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
+#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+
extern int key_payload_reserve(struct key *key, size_t datalen);
extern int key_instantiate_and_link(struct key *key,
const void *data,
@@ -278,6 +286,11 @@ extern struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info);
+extern struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ const char *callout_info,
+ void *aux);
+
extern int key_validate(struct key *key);
extern key_ref_t key_create_or_update(key_ref_t keyring,
@@ -285,7 +298,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *description,
const void *payload,
size_t plen,
- int not_in_quota);
+ unsigned long flags);
extern int key_update(key_ref_t key,
const void *payload,
@@ -299,7 +312,7 @@ extern int key_unlink(struct key *keyring,
extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
struct task_struct *ctx,
- int not_in_quota,
+ unsigned long flags,
struct key *dest);
extern int keyring_clear(struct key *keyring);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 20b1cf527c60..f4284bf89758 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -30,6 +30,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <asm/scatterlist.h>
#include <asm/io.h>
#include <linux/ata.h>
#include <linux/workqueue.h>
@@ -887,6 +888,9 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
return tag == ATA_MAX_QUEUE - 1;
}
+/*
+ * device helpers
+ */
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -917,6 +921,17 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
return ata_class_absent(dev->class);
}
+/*
+ * port helpers
+ */
+static inline int ata_port_max_devices(const struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_SLAVE_POSS)
+ return 2;
+ return 1;
+}
+
+
static inline u8 ata_chk_status(struct ata_port *ap)
{
return ap->ops->check_status(ap);
diff --git a/include/linux/license.h b/include/linux/license.h
new file mode 100644
index 000000000000..decdbf43cb5c
--- /dev/null
+++ b/include/linux/license.h
@@ -0,0 +1,14 @@
+#ifndef __LICENSE_H
+#define __LICENSE_H
+
+static inline int license_is_gpl_compatible(const char *license)
+{
+ return (strcmp(license, "GPL") == 0
+ || strcmp(license, "GPL v2") == 0
+ || strcmp(license, "GPL and additional rights") == 0
+ || strcmp(license, "Dual BSD/GPL") == 0
+ || strcmp(license, "Dual MIT/GPL") == 0
+ || strcmp(license, "Dual MPL/GPL") == 0);
+}
+
+#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index 37ca31b21bb7..6b74adf5297f 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -4,18 +4,11 @@
#ifdef __KERNEL__
#include <linux/stddef.h>
+#include <linux/poison.h>
#include <linux/prefetch.h>
#include <asm/system.h>
/*
- * These are non-NULL pointers that will result in page faults
- * under normal circumstances, used to verify that nobody uses
- * non-initialized list entries.
- */
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
-
-/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
diff --git a/include/linux/loop.h b/include/linux/loop.h
index bf3d2345ce99..e76c7611d6cc 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -59,7 +59,7 @@ struct loop_device {
struct bio *lo_bio;
struct bio *lo_biotail;
int lo_state;
- struct task_struct *lo_thread;
+ struct completion lo_done;
struct completion lo_bh_done;
struct mutex lo_ctl_mutex;
int lo_pending;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 911206386171..218501cfaeb9 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
+
+#ifdef CONFIG_NUMA
+extern int memory_add_physaddr_to_nid(u64 start);
+#else
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
+/*
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
+ *
+ */
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_free_nodedata(pg_data_t *pgdat);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+
+#ifdef CONFIG_NUMA
+/*
+ * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
+ * XXX: kmalloc_node() can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
+ */
+#define generic_alloc_nodedata(nid) \
+({ \
+ kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
+})
+/*
+ * This definition is just for error path in node hotadd.
+ * For node hotremove, we have to replace this.
+ */
+#define generic_free_nodedata(pgdat) kfree(pgdat)
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void generic_free_nodedata(pg_data_t *pgdat)
+{
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
return -ENOSYS;
}
-extern int add_memory(u64 start, u64 size);
+extern int add_memory(int nid, u64 start, u64 size);
+extern int arch_add_memory(int nid, u64 start, u64 size);
extern int remove_memory(u64 start, u64 size);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a929ea197e48..c41a1299b8cf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+ mutex_debug_check_no_locks_freed(from, len);
+ rt_mutex_debug_check_no_locks_freed(from, len);
+}
+
#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!PageHighMem(page) && !enable)
- mutex_debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
}
#endif
@@ -1065,5 +1072,7 @@ void drop_slab(void);
extern int randomize_va_space;
#endif
+const char *arch_vma_name(struct vm_area_struct *vma);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 2d366098eab5..9e9dc7c24d95 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -203,6 +203,15 @@ void *__symbol_get_gpl(const char *symbol);
#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
__EXPORT_SYMBOL(sym, "_gpl_future")
+
+#ifdef CONFIG_UNUSED_SYMBOLS
+#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
+#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#else
+#define EXPORT_UNUSED_SYMBOL(sym)
+#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+#endif
+
#endif
struct module_ref
@@ -261,6 +270,15 @@ struct module
unsigned int num_gpl_syms;
const unsigned long *gpl_crcs;
+ /* unused exported symbols. */
+ const struct kernel_symbol *unused_syms;
+ unsigned int num_unused_syms;
+ const unsigned long *unused_crcs;
+ /* GPL-only, unused exported symbols. */
+ const struct kernel_symbol *unused_gpl_syms;
+ unsigned int num_unused_gpl_syms;
+ const unsigned long *unused_gpl_crcs;
+
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
unsigned int num_gpl_future_syms;
@@ -285,6 +303,9 @@ struct module
/* The size of the executable code in each section. */
unsigned long init_text_size, core_text_size;
+ /* The handle returned from unwind_add_table. */
+ void *unwind_info;
+
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -453,6 +474,8 @@ void module_remove_driver(struct device_driver *);
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
+#define EXPORT_UNUSED_SYMBOL(sym)
+#define EXPORT_UNUSED_SYMBOL_GPL(sym)
/* Given an address, look for it in the exception tables. */
static inline const struct exception_table_entry *
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bc747e5d7138..03cd7551a7a1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -699,7 +699,6 @@ extern int dev_hard_start_xmit(struct sk_buff *skb,
extern void dev_init(void);
-extern int netdev_nit;
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index ca5a8733000f..1efe60c5c00c 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,6 +31,7 @@ struct netpoll_info {
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
+ struct sk_buff_head arp_tx; /* list of arp requests to reply to */
};
void netpoll_poll(struct netpoll *np);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0a1740b2532e..d90b1bb37563 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -335,7 +335,7 @@ extern struct inode_operations nfs_file_inode_operations;
extern struct inode_operations nfs3_file_inode_operations;
#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_file_operations;
-extern struct address_space_operations nfs_file_aops;
+extern const struct address_space_operations nfs_file_aops;
static inline struct rpc_cred *nfs_file_cred(struct file *file)
{
diff --git a/include/linux/node.h b/include/linux/node.h
index 254dc3de650b..81dcec84cd8f 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -26,8 +26,25 @@ struct node {
struct sys_device sysdev;
};
+extern struct node node_devices[];
+
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
+extern int register_one_node(int nid);
+extern void unregister_one_node(int nid);
+#ifdef CONFIG_NUMA
+extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
+extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
+#else
+static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
new file mode 100644
index 000000000000..135742cfada5
--- /dev/null
+++ b/include/linux/nsc_gpio.h
@@ -0,0 +1,42 @@
+/**
+ nsc_gpio.c
+
+ National Semiconductor GPIO common access methods.
+
+ struct nsc_gpio_ops abstracts the low-level access
+ operations for the GPIO units on 2 NSC chip families; the GEODE
+ integrated CPU, and the PC-8736[03456] integrated PC-peripheral
+ chips.
+
+ The GPIO units on these chips have the same pin architecture, but
+ the access methods differ. Thus, scx200_gpio and pc8736x_gpio
+ implement their own versions of these routines; and use the common
+ file-operations routines implemented in nsc_gpio module.
+
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+
+ NB: this work was tested on the Geode SC-1100 and PC-87366 chips.
+ NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond.
+*/
+
+struct nsc_gpio_ops {
+ struct module* owner;
+ u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits);
+ void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
+ int (*gpio_get) (unsigned iminor);
+ void (*gpio_set) (unsigned iminor, int state);
+ void (*gpio_set_high)(unsigned iminor);
+ void (*gpio_set_low) (unsigned iminor);
+ void (*gpio_change) (unsigned iminor);
+ int (*gpio_current) (unsigned iminor);
+ struct device* dev; /* for dev_dbg() support, set in init */
+};
+
+extern ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos);
+
+extern ssize_t nsc_gpio_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+
+extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index);
+
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 62a8c22f5f60..983fca251b25 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -404,8 +404,8 @@ int pcibios_enable_device(struct pci_dev *, int mask);
char *pcibios_setup (char *str);
/* Used only when drivers/pci/setup.c is used */
-void pcibios_align_resource(void *, struct resource *,
- unsigned long, unsigned long);
+void pcibios_align_resource(void *, struct resource *, resource_size_t,
+ resource_size_t);
void pcibios_update_irq(struct pci_dev *, int irq);
/* Generic PCI functions used internally */
@@ -532,10 +532,10 @@ void pci_release_region(struct pci_dev *, int);
/* drivers/pci/bus.c */
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
- unsigned long size, unsigned long align,
- unsigned long min, unsigned int type_mask,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned int type_mask,
void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
+ resource_size_t, resource_size_t),
void *alignf_data);
void pci_enable_bridges(struct pci_bus *bus);
@@ -730,7 +730,8 @@ static inline char *pci_name(struct pci_dev *pdev)
*/
#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, u64 *start, u64 *end)
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
{
*start = rsrc->start;
*end = rsrc->end;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c2fd2d19938b..9ae6b1a75366 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1202,6 +1202,7 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
@@ -2170,7 +2171,6 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
-#define PCI_DEVICE_ID_INTEL_GD31244 0x3200
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
diff --git a/include/linux/plist.h b/include/linux/plist.h
new file mode 100644
index 000000000000..b95818a037ad
--- /dev/null
+++ b/include/linux/plist.h
@@ -0,0 +1,248 @@
+/*
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This is a priority-sorted list of nodes; each node has a
+ * priority from INT_MIN (highest) to INT_MAX (lowest).
+ *
+ * Addition is O(K), removal is O(1), change of priority of a node is
+ * O(K) and K is the number of RT priority levels used in the system.
+ * (1 <= K <= 99)
+ *
+ * This list is really a list of lists:
+ *
+ * - The tier 1 list is the prio_list, different priority nodes.
+ *
+ * - The tier 2 list is the node_list, serialized nodes.
+ *
+ * Simple ASCII art explanation:
+ *
+ * |HEAD |
+ * | |
+ * |prio_list.prev|<------------------------------------|
+ * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-|
+ * |10 | |10| |21| |21| |21| |40| (prio)
+ * | | | | | | | | | | | |
+ * | | | | | | | | | | | |
+ * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
+ * |node_list.prev|<------------------------------------|
+ *
+ * The nodes on the prio_list list are sorted by priority to simplify
+ * the insertion of new nodes. There are no nodes with duplicate
+ * priorites on the list.
+ *
+ * The nodes on the node_list is ordered by priority and can contain
+ * entries which have the same priority. Those entries are ordered
+ * FIFO
+ *
+ * Addition means: look for the prio_list node in the prio_list
+ * for the priority of the node and insert it before the node_list
+ * entry of the next prio_list node. If it is the first node of
+ * that priority, add it to the prio_list in the right position and
+ * insert it into the serialized node_list list
+ *
+ * Removal means remove it from the node_list and remove it from
+ * the prio_list if the node_list list_head is non empty. In case
+ * of removal from the prio_list it must be checked whether other
+ * entries of the same priority are on the list or not. If there
+ * is another entry of the same priority then this entry has to
+ * replace the removed entry on the prio_list. If the entry which
+ * is removed is the only entry of this priority then a simple
+ * remove from both list is sufficient.
+ *
+ * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
+ * is lowest priority.
+ *
+ * No locking is done, up to the caller.
+ *
+ */
+#ifndef _LINUX_PLIST_H_
+#define _LINUX_PLIST_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+struct plist_head {
+ struct list_head prio_list;
+ struct list_head node_list;
+#ifdef CONFIG_DEBUG_PI_LIST
+ spinlock_t *lock;
+#endif
+};
+
+struct plist_node {
+ int prio;
+ struct plist_head plist;
+};
+
+#ifdef CONFIG_DEBUG_PI_LIST
+# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+#else
+# define PLIST_HEAD_LOCK_INIT(_lock)
+#endif
+
+/**
+ * #PLIST_HEAD_INIT - static struct plist_head initializer
+ *
+ * @head: struct plist_head variable name
+ */
+#define PLIST_HEAD_INIT(head, _lock) \
+{ \
+ .prio_list = LIST_HEAD_INIT((head).prio_list), \
+ .node_list = LIST_HEAD_INIT((head).node_list), \
+ PLIST_HEAD_LOCK_INIT(&(_lock)) \
+}
+
+/**
+ * #PLIST_NODE_INIT - static struct plist_node initializer
+ *
+ * @node: struct plist_node variable name
+ * @__prio: initial node priority
+ */
+#define PLIST_NODE_INIT(node, __prio) \
+{ \
+ .prio = (__prio), \
+ .plist = PLIST_HEAD_INIT((node).plist, NULL), \
+}
+
+/**
+ * plist_head_init - dynamic struct plist_head initializer
+ *
+ * @head: &struct plist_head pointer
+ */
+static inline void
+plist_head_init(struct plist_head *head, spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->lock = lock;
+#endif
+}
+
+/**
+ * plist_node_init - Dynamic struct plist_node initializer
+ *
+ * @node: &struct plist_node pointer
+ * @prio: initial node priority
+ */
+static inline void plist_node_init(struct plist_node *node, int prio)
+{
+ node->prio = prio;
+ plist_head_init(&node->plist, NULL);
+}
+
+extern void plist_add(struct plist_node *node, struct plist_head *head);
+extern void plist_del(struct plist_node *node, struct plist_head *head);
+
+/**
+ * plist_for_each - iterate over the plist
+ *
+ * @pos1: the type * to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define plist_for_each(pos, head) \
+ list_for_each_entry(pos, &(head)->node_list, plist.node_list)
+
+/**
+ * plist_for_each_entry_safe - iterate over a plist of given type safe
+ * against removal of list entry
+ *
+ * @pos1: the type * to use as a loop counter.
+ * @n1: another type * to use as temporary storage
+ * @head: the head for your list.
+ */
+#define plist_for_each_safe(pos, n, head) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list)
+
+/**
+ * plist_for_each_entry - iterate over list of given type
+ *
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define plist_for_each_entry(pos, head, mem) \
+ list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list)
+
+/**
+ * plist_for_each_entry_safe - iterate over list of given type safe against
+ * removal of list entry
+ *
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @m: the name of the list_struct within the struct.
+ */
+#define plist_for_each_entry_safe(pos, n, head, m) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list)
+
+/**
+ * plist_head_empty - return !0 if a plist_head is empty
+ *
+ * @head: &struct plist_head pointer
+ */
+static inline int plist_head_empty(const struct plist_head *head)
+{
+ return list_empty(&head->node_list);
+}
+
+/**
+ * plist_node_empty - return !0 if plist_node is not on a list
+ *
+ * @node: &struct plist_node pointer
+ */
+static inline int plist_node_empty(const struct plist_node *node)
+{
+ return plist_head_empty(&node->plist);
+}
+
+/* All functions below assume the plist_head is not empty. */
+
+/**
+ * plist_first_entry - get the struct for the first entry
+ *
+ * @ptr: the &struct plist_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#ifdef CONFIG_DEBUG_PI_LIST
+# define plist_first_entry(head, type, member) \
+({ \
+ WARN_ON(plist_head_empty(head)); \
+ container_of(plist_first(head), type, member); \
+})
+#else
+# define plist_first_entry(head, type, member) \
+ container_of(plist_first(head), type, member)
+#endif
+
+/**
+ * plist_first - return the first node (and thus, highest priority)
+ *
+ * @head: the &struct plist_head pointer
+ *
+ * Assumes the plist is _not_ empty.
+ */
+static inline struct plist_node* plist_first(const struct plist_head *head)
+{
+ return list_entry(head->node_list.next,
+ struct plist_node, plist.node_list);
+}
+
+#endif
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 93b0959eb40f..ab8a8dd8d64c 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -389,7 +389,8 @@ int pnp_start_dev(struct pnp_dev *dev);
int pnp_stop_dev(struct pnp_dev *dev);
int pnp_activate_dev(struct pnp_dev *dev);
int pnp_disable_dev(struct pnp_dev *dev);
-void pnp_resource_change(struct resource *resource, unsigned long start, unsigned long size);
+void pnp_resource_change(struct resource *resource, resource_size_t start,
+ resource_size_t size);
/* protocol helpers */
int pnp_is_active(struct pnp_dev * dev);
@@ -434,7 +435,9 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline void pnp_resource_change(struct resource *resource, unsigned long start, unsigned long size) { }
+static inline void pnp_resource_change(struct resource *resource,
+ resource_size_t start,
+ resource_size_t size) { }
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev * dev) { return 0; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
new file mode 100644
index 000000000000..a5347c02432e
--- /dev/null
+++ b/include/linux/poison.h
@@ -0,0 +1,58 @@
+#ifndef _LINUX_POISON_H
+#define _LINUX_POISON_H
+
+/********** include/linux/list.h **********/
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100)
+#define LIST_POISON2 ((void *) 0x00200200)
+
+/********** mm/slab.c **********/
+/*
+ * Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
+#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
+
+/* ...and for poisoning */
+#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
+#define POISON_FREE 0x6b /* for use-after-free poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
+
+/********** arch/$ARCH/mm/init.c **********/
+#define POISON_FREE_INITMEM 0xcc
+
+/********** arch/x86_64/mm/init.c **********/
+#define POISON_FREE_INITDATA 0xba
+
+/********** arch/ia64/hp/common/sba_iommu.c **********/
+/*
+ * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
+ * value of "SBAIOMMU POISON\0" for spill-over poisoning.
+ */
+
+/********** fs/jbd/journal.c **********/
+#define JBD_POISON_FREE 0x5b
+
+/********** drivers/base/dmapool.c **********/
+#define POOL_POISON_FREED 0xa7 /* !inuse */
+#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
+
+/********** drivers/atm/ **********/
+#define ATM_POISON_FREE 0x12
+
+/********** kernel/mutexes **********/
+#define MUTEX_DEBUG_INIT 0x11
+#define MUTEX_DEBUG_FREE 0x22
+
+/********** security/ **********/
+#define KEY_DESTROY 0xbd
+
+/********** sound/oss/ **********/
+#define OSS_POISON_FREE 0xAB
+
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 5810d28fbed9..17e75783e3a5 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -99,9 +99,8 @@ extern void proc_misc_init(void);
struct mm_struct;
+void proc_flush_task(struct task_struct *task);
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
-struct dentry *proc_pid_unhash(struct task_struct *p);
-void proc_pid_flush(struct dentry *proc_dentry);
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
unsigned long task_vsize(struct mm_struct *);
int task_statm(struct mm_struct *, int *, int *, int *, int *);
@@ -211,8 +210,7 @@ static inline void proc_net_remove(const char *name)
#define proc_net_create(name, mode, info) ({ (void)(mode), NULL; })
static inline void proc_net_remove(const char *name) {}
-static inline struct dentry *proc_pid_unhash(struct task_struct *p) { return NULL; }
-static inline void proc_pid_flush(struct dentry *proc_dentry) { }
+static inline void proc_flush_task(struct task_struct *task) { }
static inline struct proc_dir_entry *create_proc_entry(const char *name,
mode_t mode, struct proc_dir_entry *parent) { return NULL; }
@@ -248,8 +246,8 @@ extern void kclist_add(struct kcore_list *, void *, size_t);
#endif
struct proc_inode {
- struct task_struct *task;
- int type;
+ struct pid *pid;
+ int fd;
union {
int (*proc_get_link)(struct inode *, struct dentry **, struct vfsmount **);
int (*proc_read)(struct task_struct *task, char *page);
@@ -268,4 +266,10 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode)
return PROC_I(inode)->pde;
}
+struct proc_maps_private {
+ struct pid *pid;
+ struct task_struct *task;
+ struct vm_area_struct *tail_vma;
+};
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ee918bc6e18c..8b2749a259dc 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern int ptrace_attach(struct task_struct *tsk);
extern int ptrace_detach(struct task_struct *, unsigned int);
-extern void __ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, int kill);
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index 899437802aea..63df898fe2e9 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -140,6 +140,7 @@ typedef __u16 bitmap_counter_t;
enum bitmap_state {
BITMAP_ACTIVE = 0x001, /* the bitmap is in use */
BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
+ BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
BITMAP_HOSTENDIAN = 0x8000,
};
@@ -244,15 +245,9 @@ struct bitmap {
unsigned long daemon_lastrun; /* jiffies of last run */
unsigned long daemon_sleep; /* how many seconds between updates? */
- /*
- * bitmap_writeback_daemon waits for file-pages that have been written,
- * as there is no way to get a call-back when a page write completes.
- */
- mdk_thread_t *writeback_daemon;
- spinlock_t write_lock;
+ atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
- struct list_head complete_pages;
- mempool_t *write_pool;
+
};
/* the bitmap API */
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
index 7eaf290e10e7..ba15469daf11 100644
--- a/include/linux/raid/linear.h
+++ b/include/linux/raid/linear.h
@@ -13,8 +13,10 @@ typedef struct dev_info dev_info_t;
struct linear_private_data
{
+ struct linear_private_data *prev; /* earlier version */
dev_info_t **hash_table;
sector_t hash_spacing;
+ sector_t array_size;
int preshift; /* shift before dividing by hash_spacing */
dev_info_t disks[0];
};
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 66b44e5e0d6e..eb3e547c8fee 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -85,8 +85,6 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev);
extern void md_unplug_mddev(mddev_t *mddev);
-extern void md_print_devices (void);
-
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
@@ -97,7 +95,5 @@ extern void md_new_event(mddev_t *mddev);
extern void md_update_sb(mddev_t * mddev);
-#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
-
#endif
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index e2df61f5b09a..c1e0ac55bab5 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -40,7 +40,8 @@ typedef struct mdk_rdev_s mdk_rdev_t;
* options passed in raidrun:
*/
-#define MAX_CHUNK_SIZE (4096*1024)
+/* Currently this must fix in an 'int' */
+#define MAX_CHUNK_SIZE (1<<30)
/*
* MD's 'extended' device
@@ -57,6 +58,7 @@ struct mdk_rdev_s
struct page *sb_page;
int sb_loaded;
+ __u64 sb_events;
sector_t data_offset; /* start of data in array */
sector_t sb_offset;
int sb_size; /* bytes in the superblock */
@@ -87,6 +89,10 @@ struct mdk_rdev_s
* array and could again if we did a partial
* resync from the bitmap
*/
+ sector_t recovery_offset;/* If this device has been partially
+ * recovered, this is where we were
+ * up to.
+ */
atomic_t nr_pending; /* number of pending requests.
* only maintained for arrays that
@@ -182,6 +188,8 @@ struct mddev_s
#define MD_RECOVERY_REQUESTED 6
#define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
+#define MD_RECOVERY_FROZEN 9
+
unsigned long recovery;
int in_sync; /* know to not need resync */
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index f1fbae7e390e..b6ebc69bae54 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -265,9 +265,12 @@ struct mdp_superblock_1 {
/* feature_map bits */
#define MD_FEATURE_BITMAP_OFFSET 1
+#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
+ * must be honoured
+ */
#define MD_FEATURE_RESHAPE_ACTIVE 4
-#define MD_FEATURE_ALL 5
+#define MD_FEATURE_ALL (1|2|4)
#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
index b1103298a8c2..c41e56a7c090 100644
--- a/include/linux/raid/raid10.h
+++ b/include/linux/raid/raid10.h
@@ -24,11 +24,16 @@ struct r10_private_data_s {
int far_copies; /* number of copies layed out
* at large strides across drives
*/
+ int far_offset; /* far_copies are offset by 1 stripe
+ * instead of many
+ */
int copies; /* near_copies * far_copies.
* must be <= raid_disks
*/
sector_t stride; /* distance between far copies.
- * This is size / far_copies
+ * This is size / far_copies unless
+ * far_offset, in which case it is
+ * 1 stripe.
*/
int chunk_shift; /* shift from chunks to sectors */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 914af667044f..20ed4c997636 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -212,6 +212,7 @@ struct raid5_private_data {
mddev_t *mddev;
struct disk_info *spare;
int chunk_size, level, algorithm;
+ int max_degraded;
int raid_disks, working_disks, failed_disks;
int max_nr_stripes;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6312758393b6..48dfe00070c7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -258,6 +258,7 @@ extern void rcu_init(void);
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
/* Exported interfaces */
extern void FASTCALL(call_rcu(struct rcu_head *head,
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 5676c4210e2c..daa2d83cefe8 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1973,7 +1973,7 @@ void reiserfs_unmap_buffer(struct buffer_head *);
/* file.c */
extern struct inode_operations reiserfs_file_inode_operations;
extern const struct file_operations reiserfs_file_operations;
-extern struct address_space_operations reiserfs_address_space_operations;
+extern const struct address_space_operations reiserfs_address_space_operations;
/* fix_nodes.c */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
new file mode 100644
index 000000000000..fa4a3b82ba70
--- /dev/null
+++ b/include/linux/rtmutex.h
@@ -0,0 +1,117 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains the public data structure and API definitions.
+ */
+
+#ifndef __LINUX_RT_MUTEX_H
+#define __LINUX_RT_MUTEX_H
+
+#include <linux/linkage.h>
+#include <linux/plist.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * The rt_mutex structure
+ *
+ * @wait_lock: spinlock to protect the structure
+ * @wait_list: pilist head to enqueue waiters in priority order
+ * @owner: the mutex owner
+ */
+struct rt_mutex {
+ spinlock_t wait_lock;
+ struct plist_head wait_list;
+ struct task_struct *owner;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
+ struct list_head held_list_entry;
+ unsigned long acquire_ip;
+ const char *name, *file;
+ int line;
+ void *magic;
+#endif
+};
+
+struct rt_mutex_waiter;
+struct hrtimer_sleeper;
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ extern int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len);
+ extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+#else
+ static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+ {
+ return 0;
+ }
+# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__)
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+#else
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+# define rt_mutex_debug_task_free(t) do { } while (0)
+#endif
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ , .owner = NULL \
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+
+#define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+
+/***
+ * rt_mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return lock->owner != NULL;
+}
+
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+extern void rt_mutex_lock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock);
+extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock);
+
+extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+extern void rt_mutex_unlock(struct rt_mutex *lock);
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define INIT_RT_MUTEX_DEBUG(tsk) \
+ .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
+ .held_list_lock = SPIN_LOCK_UNLOCKED
+#else
+# define INIT_RT_MUTEX_DEBUG(tsk)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
+ INIT_RT_MUTEX_DEBUG(tsk)
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d11d9310db0..821f0481ebe1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -73,6 +73,7 @@ struct sched_param {
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/futex.h>
+#include <linux/rtmutex.h>
#include <linux/time.h>
#include <linux/param.h>
@@ -83,6 +84,7 @@ struct sched_param {
#include <asm/processor.h>
struct exec_domain;
+struct futex_pi_state;
/*
* List of flags we want to share for kernel threads,
@@ -123,6 +125,7 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
+extern unsigned long weighted_cpuload(const int cpu);
/*
@@ -494,8 +497,11 @@ struct signal_struct {
#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+#define rt_task(p) rt_prio((p)->prio)
#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
+#define has_rt_policy(p) \
+ unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
/*
* Some day this will be a full-fledged user tracking system..
@@ -558,9 +564,9 @@ enum idle_type
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 4 /* Balance on exec */
@@ -569,6 +575,11 @@ enum idle_type
#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
+#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
+
+#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
+ ? SD_POWERSAVINGS_BALANCE : 0)
+
struct sched_group {
struct sched_group *next; /* Must be a circular list */
@@ -638,7 +649,7 @@ struct sched_domain {
#endif
};
-extern void partition_sched_domains(cpumask_t *partition1,
+extern int partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
/*
@@ -713,10 +724,13 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#ifdef CONFIG_SMP
+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
- int prio, static_prio;
+#endif
+ int load_weight; /* for niceness load balancing purposes */
+ int prio, static_prio, normal_prio;
struct list_head run_list;
prio_array_t *array;
@@ -842,8 +856,20 @@ struct task_struct {
u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
spinlock_t alloc_lock;
-/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
- spinlock_t proc_lock;
+
+ /* Protection of the PI data structures: */
+ spinlock_t pi_lock;
+
+#ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task */
+ struct plist_head pi_waiters;
+ /* Deadlock detection and priority inheritance handling */
+ struct rt_mutex_waiter *pi_blocked_on;
+# ifdef CONFIG_DEBUG_RT_MUTEXES
+ spinlock_t held_list_lock;
+ struct list_head held_list_head;
+# endif
+#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
@@ -856,7 +882,6 @@ struct task_struct {
/* VM state */
struct reclaim_state *reclaim_state;
- struct dentry *proc_dentry;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
@@ -891,6 +916,8 @@ struct task_struct {
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
@@ -958,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
+#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1012,6 +1040,19 @@ static inline void idle_task_exit(void) {}
#endif
extern void sched_idle_next(void);
+
+#ifdef CONFIG_RT_MUTEXES
+extern int rt_mutex_getprio(task_t *p);
+extern void rt_mutex_setprio(task_t *p, int prio);
+extern void rt_mutex_adjust_pi(task_t *p);
+#else
+static inline int rt_mutex_getprio(task_t *p)
+{
+ return p->normal_prio;
+}
+# define rt_mutex_adjust_pi(p) do { } while (0)
+#endif
+
extern void set_user_nice(task_t *p, long nice);
extern int task_prio(const task_t *p);
extern int task_nice(const task_t *p);
@@ -1411,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
+#include <linux/sysdev.h>
+extern int sched_mc_power_savings, sched_smt_power_savings;
+extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
+extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
+
extern void normalize_rt_tasks(void);
#ifdef CONFIG_PM
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
index a22f9e173ad2..693c0557e70b 100644
--- a/include/linux/scx200.h
+++ b/include/linux/scx200.h
@@ -49,10 +49,3 @@ extern unsigned scx200_cb_base;
#define SCx200_REV 0x3d /* Revision Register */
#define SCx200_CBA 0x3e /* Configuration Base Address Register */
#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */
-
-/*
- Local variables:
- compile-command: "make -C ../.. bzImage modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h
index 30cdd648ba79..90dd069cc145 100644
--- a/include/linux/scx200_gpio.h
+++ b/include/linux/scx200_gpio.h
@@ -1,6 +1,6 @@
#include <linux/spinlock.h>
-u32 scx200_gpio_configure(int index, u32 set, u32 clear);
+u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear);
extern unsigned scx200_gpio_base;
extern long scx200_gpio_shadow[2];
@@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2];
/* returns the value of the GPIO pin */
-static inline int scx200_gpio_get(int index) {
+static inline int scx200_gpio_get(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR + 0x04;
__SCx200_GPIO_INDEX;
@@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) {
driven if the GPIO is configured as an output, it might not be the
state of the GPIO right now if the GPIO is configured as an input) */
-static inline int scx200_gpio_current(int index) {
+static inline int scx200_gpio_current(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_INDEX;
@@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) {
/* drive the GPIO signal high */
-static inline void scx200_gpio_set_high(int index) {
+static inline void scx200_gpio_set_high(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) {
/* drive the GPIO signal low */
-static inline void scx200_gpio_set_low(int index) {
+static inline void scx200_gpio_set_low(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) {
/* drive the GPIO signal to state */
-static inline void scx200_gpio_set(int index, int state) {
+static inline void scx200_gpio_set(unsigned index, int state) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) {
}
/* toggle the GPIO signal */
-static inline void scx200_gpio_change(int index) {
+static inline void scx200_gpio_change(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) {
#undef __SCx200_GPIO_SHADOW
#undef __SCx200_GPIO_INDEX
#undef __SCx200_GPIO_OUT
-
-/*
- Local variables:
- compile-command: "make -C ../.. bzImage modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/include/linux/security.h b/include/linux/security.h
index d2c17bd91a29..51805806f974 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -862,6 +862,7 @@ struct swap_info_struct;
* Permit allocation of a key and assign security data. Note that key does
* not have a serial number assigned at this point.
* @key points to the key.
+ * @flags is the allocation flags
* Return 0 if permission is granted, -ve error otherwise.
* @key_free:
* Notification of destruction; free security data.
@@ -1324,7 +1325,7 @@ struct security_operations {
/* key management security hooks */
#ifdef CONFIG_KEYS
- int (*key_alloc)(struct key *key, struct task_struct *tsk);
+ int (*key_alloc)(struct key *key, struct task_struct *tsk, unsigned long flags);
void (*key_free)(struct key *key);
int (*key_permission)(key_ref_t key_ref,
struct task_struct *context,
@@ -3040,9 +3041,10 @@ static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid
#ifdef CONFIG_KEYS
#ifdef CONFIG_SECURITY
static inline int security_key_alloc(struct key *key,
- struct task_struct *tsk)
+ struct task_struct *tsk,
+ unsigned long flags)
{
- return security_ops->key_alloc(key, tsk);
+ return security_ops->key_alloc(key, tsk, flags);
}
static inline void security_key_free(struct key *key)
@@ -3060,7 +3062,8 @@ static inline int security_key_permission(key_ref_t key_ref,
#else
static inline int security_key_alloc(struct key *key,
- struct task_struct *tsk)
+ struct task_struct *tsk,
+ unsigned long flags)
{
return 0;
}
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e928c0dcc297..c8bb68099eb9 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -642,10 +642,14 @@ struct spi_board_info {
u16 bus_num;
u16 chip_select;
+ /* mode becomes spi_device.mode, and is essential for chips
+ * where the default of SPI_CS_HIGH = 0 is wrong.
+ */
+ u8 mode;
+
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
- * - chipselect polarity
* - quirks like clock rate mattering when not selected
*/
};
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 9b8bcf125c18..6e112cc5cdda 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -126,7 +126,7 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
/* Just increments the mechanism's reference count and returns its input: */
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
-/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a
+/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
* corresponding call to gss_mech_put. */
void gss_mech_put(struct gss_api_mech *);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index e82cb10fb3ea..96e31aa64cc7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,7 +71,6 @@ struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
unsigned long get_safe_page(gfp_t gfp_mask);
-int swsusp_add_arch_pages(unsigned long start, unsigned long end);
/*
* XXX: We try to keep some more pages free so that I/O operations succeed
diff --git a/include/linux/swap.h b/include/linux/swap.h
index dc3f3aa0c83e..c41e2d6d1acc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
+extern int kswapd_run(int nid);
+
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 33785b79d548..008f04c56737 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid,
int options, struct rusage __user *ru);
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
asmlinkage long sys_set_tid_address(int __user *tidptr);
-asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct timespec __user *utime, u32 __user *uaddr2,
- int val3);
+ u32 val3);
asmlinkage long sys_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6a60770984e9..46e4d8f2771f 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -148,9 +148,12 @@ enum
KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
+ KERN_COMPAT_LOG=73, /* int: print compat layer messages */
+ KERN_MAX_LOCK_DEPTH=74,
};
+
/* CTL_VM names: */
enum
{
@@ -187,6 +190,7 @@ enum
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
+ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
};
diff --git a/include/linux/time.h b/include/linux/time.h
index 0cd696cee998..c05f8bb9a323 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -28,10 +28,13 @@ struct timezone {
#ifdef __KERNEL__
/* Parameters used to convert the timespec values: */
-#define MSEC_PER_SEC 1000L
-#define USEC_PER_SEC 1000000L
-#define NSEC_PER_SEC 1000000000L
-#define NSEC_PER_USEC 1000L
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_MSEC 1000L
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define FSEC_PER_SEC 1000000000000000L
static inline int timespec_equal(struct timespec *a, struct timespec *b)
{
@@ -77,6 +80,8 @@ extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
+void timekeeping_init(void);
+
static inline unsigned long get_seconds(void)
{
return xtime.tv_sec;
@@ -100,6 +105,7 @@ extern int do_getitimer(int which, struct itimerval *value);
extern void getnstimeofday(struct timespec *tv);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+extern int timekeeping_is_continuous(void);
/**
* timespec_to_ns - Convert timespec to nanoseconds
@@ -142,6 +148,20 @@ extern struct timespec ns_to_timespec(const s64 nsec);
*/
extern struct timeval ns_to_timeval(const s64 nsec);
+/**
+ * timespec_add_ns - Adds nanoseconds to a timespec
+ * @a: pointer to timespec to be incremented
+ * @ns: unsigned nanoseconds value to be added
+ */
+static inline void timespec_add_ns(struct timespec *a, u64 ns)
+{
+ ns += a->tv_nsec;
+ while(unlikely(ns >= NSEC_PER_SEC)) {
+ ns -= NSEC_PER_SEC;
+ a->tv_sec++;
+ }
+ a->tv_nsec = ns;
+}
#endif /* __KERNEL__ */
#define NFDBITS __NFDBITS
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 34d3ccff7bbb..19bb6538b49e 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -303,6 +303,8 @@ time_interpolator_reset(void)
#endif /* !CONFIG_TIME_INTERPOLATION */
+#define TICK_LENGTH_SHIFT 32
+
/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
extern u64 current_tick_length(void);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a305ae2e44b6..ec1eca85290a 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -134,7 +134,8 @@
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE, \
+ | SD_WAKE_AFFINE \
+ | BALANCE_FOR_POWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
.nr_balance_failed = 0, \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index cb35ca50a0a6..b3b807e4b050 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -57,7 +57,6 @@ struct tty_buffer {
unsigned char *flag_buf_ptr;
int used;
int size;
- int active;
int commit;
int read;
/* Data points here */
@@ -259,7 +258,6 @@ struct tty_struct {
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
#define TTY_PUSH 6 /* n_tty private */
#define TTY_CLOSING 7 /* ->close() in progress */
-#define TTY_DONT_FLIP 8 /* Defer buffer flip */
#define TTY_LDISC 9 /* Line discipline attached */
#define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */
#define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 31548303ee37..eb677cf56106 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,7 +12,7 @@ static inline int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
- if (tb && tb->active && tb->used < tb->size) {
+ if (tb && tb->used < tb->size) {
tb->flag_buf_ptr[tb->used] = flag;
tb->char_buf_ptr[tb->used++] = ch;
return 1;
diff --git a/include/linux/types.h b/include/linux/types.h
index a5e46e783ffa..3f235660a3cd 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -177,8 +177,15 @@ typedef __u64 __bitwise __be64;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
+
+#ifdef CONFIG_RESOURCES_64BIT
+typedef u64 resource_size_t;
+#else
+typedef u32 resource_size_t;
#endif
+#endif /* __KERNEL__ */
+
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index 914f911325be..e39b7cc43390 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -966,7 +966,7 @@ extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
extern struct inode_operations ufs_file_inode_operations;
extern const struct file_operations ufs_file_operations;
-extern struct address_space_operations ufs_aops;
+extern const struct address_space_operations ufs_aops;
/* ialloc.c */
extern void ufs_free_inode (struct inode *inode);
diff --git a/include/linux/unwind.h b/include/linux/unwind.h
new file mode 100644
index 000000000000..ce48e2cd37a2
--- /dev/null
+++ b/include/linux/unwind.h
@@ -0,0 +1,127 @@
+#ifndef _LINUX_UNWIND_H
+#define _LINUX_UNWIND_H
+
+/*
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/config.h>
+
+struct module;
+
+#ifdef CONFIG_STACK_UNWIND
+
+#include <asm/unwind.h>
+
+#ifndef ARCH_UNWIND_SECTION_NAME
+#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
+#endif
+
+/*
+ * Initialize unwind support.
+ */
+extern void unwind_init(void);
+
+#ifdef CONFIG_MODULES
+
+extern void *unwind_add_table(struct module *,
+ const void *table_start,
+ unsigned long table_size);
+
+extern void unwind_remove_table(void *handle, int init_only);
+
+#endif
+
+extern int unwind_init_frame_info(struct unwind_frame_info *,
+ struct task_struct *,
+ /*const*/ struct pt_regs *);
+
+/*
+ * Prepare to unwind a blocked task.
+ */
+extern int unwind_init_blocked(struct unwind_frame_info *,
+ struct task_struct *);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern int unwind_init_running(struct unwind_frame_info *,
+ asmlinkage int (*callback)(struct unwind_frame_info *,
+ void *arg),
+ void *arg);
+
+/*
+ * Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unwind(struct unwind_frame_info *);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unwind_to_user(struct unwind_frame_info *);
+
+#else
+
+struct unwind_frame_info {};
+
+static inline void unwind_init(void) {}
+
+#ifdef CONFIG_MODULES
+
+static inline void *unwind_add_table(struct module *mod,
+ const void *table_start,
+ unsigned long table_size)
+{
+ return NULL;
+}
+
+#endif
+
+static inline void unwind_remove_table(void *handle, int init_only)
+{
+}
+
+static inline int unwind_init_frame_info(struct unwind_frame_info *info,
+ struct task_struct *tsk,
+ const struct pt_regs *regs)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_init_blocked(struct unwind_frame_info *info,
+ struct task_struct *tsk)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_init_running(struct unwind_frame_info *info,
+ asmlinkage int (*cb)(struct unwind_frame_info *,
+ void *arg),
+ void *arg)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind(struct unwind_frame_info *info)
+{
+ return -ENOSYS;
+}
+
+static inline int unwind_to_user(struct unwind_frame_info *info)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+#endif /* _LINUX_UNWIND_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4f428547ec09..a62673dad76e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -245,6 +245,7 @@ struct v4l2_pix_format
#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */
#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */
+#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:1:1 16x16 macroblocks */
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */
@@ -821,6 +822,11 @@ enum v4l2_mpeg_stream_type {
#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
+enum v4l2_mpeg_stream_vbi_fmt {
+ V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
+ V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
+};
/* MPEG audio */
#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 1192ed8f4fe8..011bcfeb9f09 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -28,6 +28,9 @@ struct watchdog_info {
#define WDIOC_KEEPALIVE _IOR(WATCHDOG_IOCTL_BASE, 5, int)
#define WDIOC_SETTIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 6, int)
#define WDIOC_GETTIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 7, int)
+#define WDIOC_SETPRETIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 8, int)
+#define WDIOC_GETPRETIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 9, int)
+#define WDIOC_GETTIMELEFT _IOR(WATCHDOG_IOCTL_BASE, 10, int)
#define WDIOF_UNKNOWN -1 /* Unknown flag error */
#define WDIOS_UNKNOWN -1 /* Unknown status error */
@@ -38,9 +41,10 @@ struct watchdog_info {
#define WDIOF_EXTERN2 0x0008 /* External relay 2 */
#define WDIOF_POWERUNDER 0x0010 /* Power bad/power fault */
#define WDIOF_CARDRESET 0x0020 /* Card previously reset the CPU */
-#define WDIOF_POWEROVER 0x0040 /* Power over voltage */
-#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */
-#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */
+#define WDIOF_POWEROVER 0x0040 /* Power over voltage */
+#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */
+#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */
+#define WDIOF_PRETIMEOUT 0x0200 /* Pretimeout (in seconds), get/set */
#define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */
#define WDIOS_DISABLECARD 0x0001 /* Turn off the watchdog timer */
diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h
index 51fb06b4c394..d91d88f93c8b 100644
--- a/include/media/cx2341x.h
+++ b/include/media/cx2341x.h
@@ -25,8 +25,13 @@ enum cx2341x_port {
CX2341X_PORT_SERIAL = 2
};
+enum cx2341x_cap {
+ CX2341X_CAP_HAS_SLICED_VBI = 1 << 0,
+};
+
struct cx2341x_mpeg_params {
/* misc */
+ u32 capabilities;
enum cx2341x_port port;
u16 width;
u16 height;
@@ -34,6 +39,7 @@ struct cx2341x_mpeg_params {
/* stream */
enum v4l2_mpeg_stream_type stream_type;
+ enum v4l2_mpeg_stream_vbi_fmt stream_vbi_fmt;
/* audio */
enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq;
@@ -83,9 +89,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params,
struct v4l2_queryctrl *qctrl);
const char **cx2341x_ctrl_get_menu(u32 id);
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
- struct v4l2_ext_controls *ctrls, int cmd);
+ struct v4l2_ext_controls *ctrls, unsigned int cmd);
void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p);
-void cx2341x_log_status(struct cx2341x_mpeg_params *p, int cardid);
+void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix);
/* Firmware names */
#define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw"
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
index 098607cd4b78..e07136d74c2f 100644
--- a/include/net/tipc/tipc_bearer.h
+++ b/include/net/tipc/tipc_bearer.h
@@ -49,10 +49,18 @@
#define TIPC_MEDIA_TYPE_ETH 1
+/*
+ * Destination address structure used by TIPC bearers when sending messages
+ *
+ * IMPORTANT: The fields of this structure MUST be stored using the specified
+ * byte order indicated below, as the structure is exchanged between nodes
+ * as part of a link setup process.
+ */
+
struct tipc_media_addr {
- __u32 type;
+ __u32 type; /* bearer type (network byte order) */
union {
- __u8 eth_addr[6]; /* Ethernet bearer */
+ __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
#if 0
/* Prototypes for other possible bearer types */
diff --git a/init/Kconfig b/init/Kconfig
index e0358f3946a1..f70f2fd273c2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1,3 +1,11 @@
+config DEFCONFIG_LIST
+ string
+ option defconfig_list
+ default "/lib/modules/$UNAME_RELEASE/.config"
+ default "/etc/kernel-config"
+ default "/boot/config-$UNAME_RELEASE"
+ default "arch/$ARCH/defconfig"
+
menu "Code maturity level options"
config EXPERIMENTAL
@@ -236,16 +244,6 @@ config UID16
help
This enables the legacy 16-bit UID syscall wrappers.
-config VM86
- depends X86
- default y
- bool "Enable VM86 support" if EMBEDDED
- help
- This option is required by programs like DOSEMU to run 16-bit legacy
- code on X86 processors. It also may be needed by software like
- XFree86 to initialize some video cards via BIOS. Disabling this
- option saves about 6k.
-
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size (Look out for broken compilers!)"
default y
@@ -341,9 +339,14 @@ config BASE_FULL
kernel data structures. This saves memory on small machines,
but may reduce performance.
+config RT_MUTEXES
+ boolean
+ select PLIST
+
config FUTEX
bool "Enable futex support" if EMBEDDED
default y
+ select RT_MUTEXES
help
Disabling this option will cause the kernel to be built without
support for "fast userspace mutexes". The resulting kernel may not
diff --git a/init/initramfs.c b/init/initramfs.c
index f81cfa40a719..d28c1094d7e5 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -30,6 +30,7 @@ static void __init free(void *where)
static __initdata struct hash {
int ino, minor, major;
+ mode_t mode;
struct hash *next;
char name[N_ALIGN(PATH_MAX)];
} *head[32];
@@ -41,7 +42,8 @@ static inline int hash(int major, int minor, int ino)
return tmp & 31;
}
-static char __init *find_link(int major, int minor, int ino, char *name)
+static char __init *find_link(int major, int minor, int ino,
+ mode_t mode, char *name)
{
struct hash **p, *q;
for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
@@ -51,14 +53,17 @@ static char __init *find_link(int major, int minor, int ino, char *name)
continue;
if ((*p)->major != major)
continue;
+ if (((*p)->mode ^ mode) & S_IFMT)
+ continue;
return (*p)->name;
}
q = (struct hash *)malloc(sizeof(struct hash));
if (!q)
panic("can't allocate link hash entry");
- q->ino = ino;
- q->minor = minor;
q->major = major;
+ q->minor = minor;
+ q->ino = ino;
+ q->mode = mode;
strcpy(q->name, name);
q->next = NULL;
*p = q;
@@ -229,13 +234,25 @@ static int __init do_reset(void)
static int __init maybe_link(void)
{
if (nlink >= 2) {
- char *old = find_link(major, minor, ino, collected);
+ char *old = find_link(major, minor, ino, mode, collected);
if (old)
return (sys_link(old, collected) < 0) ? -1 : 1;
}
return 0;
}
+static void __init clean_path(char *path, mode_t mode)
+{
+ struct stat st;
+
+ if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
+ if (S_ISDIR(st.st_mode))
+ sys_rmdir(path);
+ else
+ sys_unlink(path);
+ }
+}
+
static __initdata int wfd;
static int __init do_name(void)
@@ -248,9 +265,15 @@ static int __init do_name(void)
}
if (dry_run)
return 0;
+ clean_path(collected, mode);
if (S_ISREG(mode)) {
- if (maybe_link() >= 0) {
- wfd = sys_open(collected, O_WRONLY|O_CREAT, mode);
+ int ml = maybe_link();
+ if (ml >= 0) {
+ int openflags = O_WRONLY|O_CREAT;
+ if (ml != 1)
+ openflags |= O_TRUNC;
+ wfd = sys_open(collected, openflags, mode);
+
if (wfd >= 0) {
sys_fchown(wfd, uid, gid);
sys_fchmod(wfd, mode);
@@ -291,6 +314,7 @@ static int __init do_copy(void)
static int __init do_symlink(void)
{
collected[N_ALIGN(name_len) + body_len] = '\0';
+ clean_path(collected, 0);
sys_symlink(collected + N_ALIGN(name_len), collected);
sys_lchown(collected, uid, gid);
state = SkipIt;
diff --git a/init/main.c b/init/main.c
index f715b9b89753..0d57f6ccb63a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -47,6 +47,8 @@
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
+#include <linux/unwind.h>
+#include <linux/buffer_head.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -79,7 +81,6 @@ extern void mca_init(void);
extern void sbus_init(void);
extern void sysctl_init(void);
extern void signals_init(void);
-extern void buffer_init(void);
extern void pidhash_init(void);
extern void pidmap_init(void);
extern void prio_tree_init(void);
@@ -482,6 +483,7 @@ asmlinkage void __init start_kernel(void)
__stop___param - __start___param,
&unknown_bootoption);
sort_main_extable();
+ unwind_init();
trap_init();
rcu_init();
init_IRQ();
@@ -490,6 +492,7 @@ asmlinkage void __init start_kernel(void)
hrtimers_init();
softirq_init();
time_init();
+ timekeeping_init();
/*
* HACK ALERT! This is early. We're enabling the console before
diff --git a/kernel/Makefile b/kernel/Makefile
index f6ef00f4f90f..82fb182f6f61 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,17 +10,22 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o
+obj-y += time/
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_FUTEX) += futex.o
ifeq ($(CONFIG_COMPAT),y)
obj-$(CONFIG_FUTEX) += futex_compat.o
endif
+obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
+obj-$(CONFIG_STACK_UNWIND) += unwind.o
obj-$(CONFIG_PM) += power/
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 368c4f03fe0e..126ca43d5d2b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -521,6 +521,7 @@ static void do_acct_process(struct file *file)
/**
* acct_init_pacct - initialize a new pacct_struct
+ * @pacct: per-process accounting info struct to initialize
*/
void acct_init_pacct(struct pacct_struct *pacct)
{
@@ -576,7 +577,7 @@ void acct_collect(long exitcode, int group_dead)
*
* handles process accounting for an exiting task
*/
-void acct_process()
+void acct_process(void)
{
struct file *file = NULL;
diff --git a/kernel/audit.c b/kernel/audit.c
index 7dfac7031bd7..82443fb433ef 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -818,7 +818,7 @@ err:
*/
unsigned int audit_serial(void)
{
- static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(serial_lock);
static unsigned int serial = 0;
unsigned long flags;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9ebd96fda295..dc5e3f01efe7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -658,8 +658,7 @@ static void audit_log_task_context(struct audit_buffer *ab)
return;
error_path:
- if (ctx)
- kfree(ctx);
+ kfree(ctx);
audit_panic("error in audit_log_task_context");
return;
}
@@ -1367,7 +1366,7 @@ int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
* @mqdes: MQ descriptor
* @msg_len: Message length
* @msg_prio: Message priority
- * @abs_timeout: Message timeout in absolute time
+ * @u_abs_timeout: Message timeout in absolute time
*
* Returns 0 for success or NULL context or < 0 on error.
*/
@@ -1409,8 +1408,8 @@ int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
* __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive
* @mqdes: MQ descriptor
* @msg_len: Message length
- * @msg_prio: Message priority
- * @abs_timeout: Message timeout in absolute time
+ * @u_msg_prio: Message priority
+ * @u_abs_timeout: Message timeout in absolute time
*
* Returns 0 for success or NULL context or < 0 on error.
*/
@@ -1558,7 +1557,6 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
- * @ipcp: in-kernel IPC permissions
*
* Returns 0 for success or NULL context or < 0 on error.
*/
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fe2b8d0bfe4c..70fbf2e83766 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -13,12 +13,12 @@
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/* This protects CPUs going up and down... */
-static DECLARE_MUTEX(cpucontrol);
+static DEFINE_MUTEX(cpucontrol);
-static BLOCKING_NOTIFIER_HEAD(cpu_chain);
+static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *lock_cpu_hotplug_owner;
@@ -30,9 +30,9 @@ static int __lock_cpu_hotplug(int interruptible)
if (lock_cpu_hotplug_owner != current) {
if (interruptible)
- ret = down_interruptible(&cpucontrol);
+ ret = mutex_lock_interruptible(&cpucontrol);
else
- down(&cpucontrol);
+ mutex_lock(&cpucontrol);
}
/*
@@ -56,7 +56,7 @@ void unlock_cpu_hotplug(void)
{
if (--lock_cpu_hotplug_depth == 0) {
lock_cpu_hotplug_owner = NULL;
- up(&cpucontrol);
+ mutex_unlock(&cpucontrol);
}
}
EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
@@ -69,10 +69,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
-int register_cpu_notifier(struct notifier_block *nb)
+int __cpuinit register_cpu_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&cpu_chain, nb);
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
@@ -81,7 +84,6 @@ void unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_cpu_notifier);
-#ifdef CONFIG_HOTPLUG_CPU
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b602f73fb38d..1535af3a912d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2442,31 +2442,43 @@ void __cpuset_memory_pressure_bump(void)
*/
static int proc_cpuset_show(struct seq_file *m, void *v)
{
+ struct pid *pid;
struct task_struct *tsk;
char *buf;
- int retval = 0;
+ int retval;
+ retval = -ENOMEM;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
- return -ENOMEM;
+ goto out;
+
+ retval = -ESRCH;
+ pid = m->private;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk)
+ goto out_free;
- tsk = m->private;
+ retval = -EINVAL;
mutex_lock(&manage_mutex);
+
retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
if (retval < 0)
- goto out;
+ goto out_unlock;
seq_puts(m, buf);
seq_putc(m, '\n');
-out:
+out_unlock:
mutex_unlock(&manage_mutex);
+ put_task_struct(tsk);
+out_free:
kfree(buf);
+out:
return retval;
}
static int cpuset_open(struct inode *inode, struct file *file)
{
- struct task_struct *tsk = PROC_I(inode)->task;
- return single_open(file, proc_cpuset_show, tsk);
+ struct pid *pid = PROC_I(inode)->pid;
+ return single_open(file, proc_cpuset_show, pid);
}
struct file_operations proc_cpuset_operations = {
diff --git a/kernel/exit.c b/kernel/exit.c
index e76bd02e930e..ab06b9f88f64 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -137,12 +137,8 @@ void release_task(struct task_struct * p)
{
int zap_leader;
task_t *leader;
- struct dentry *proc_dentry;
-
repeat:
atomic_dec(&p->user->processes);
- spin_lock(&p->proc_lock);
- proc_dentry = proc_pid_unhash(p);
write_lock_irq(&tasklist_lock);
ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -171,8 +167,7 @@ repeat:
sched_exit(p);
write_unlock_irq(&tasklist_lock);
- spin_unlock(&p->proc_lock);
- proc_pid_flush(proc_dentry);
+ proc_flush_task(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
@@ -931,9 +926,18 @@ fastcall NORET_TYPE void do_exit(long code)
tsk->mempolicy = NULL;
#endif
/*
+ * This must happen late, after the PID is not
+ * hashed anymore:
+ */
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
+ exit_pi_state_list(tsk);
+ if (unlikely(current->pi_state_cache))
+ kfree(current->pi_state_cache);
+ /*
* If DEBUG_MUTEXES is on, make sure we are holding no locks:
*/
mutex_debug_check_no_locks_held(tsk);
+ rt_mutex_debug_check_no_locks_held(tsk);
if (tsk->io_context)
exit_io_context();
diff --git a/kernel/fork.c b/kernel/fork.c
index dfd10cb370c3..628198a4f28a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -104,6 +104,7 @@ static kmem_cache_t *mm_cachep;
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
@@ -913,6 +914,19 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
return current->pid;
}
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+ spin_lock_init(&p->pi_lock);
+ plist_head_init(&p->pi_waiters, &p->pi_lock);
+ p->pi_blocked_on = NULL;
+# ifdef CONFIG_DEBUG_RT_MUTEXES
+ spin_lock_init(&p->held_list_lock);
+ INIT_LIST_HEAD(&p->held_list_head);
+# endif
+#endif
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
@@ -993,13 +1007,10 @@ static task_t *copy_process(unsigned long clone_flags,
if (put_user(p->pid, parent_tidptr))
goto bad_fork_cleanup;
- p->proc_dentry = NULL;
-
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
- spin_lock_init(&p->proc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
@@ -1037,6 +1048,8 @@ static task_t *copy_process(unsigned long clone_flags,
mpol_fix_fork_child_flag(p);
#endif
+ rt_mutex_init_task(p);
+
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
@@ -1079,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags,
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
+ INIT_LIST_HEAD(&p->pi_state_list);
+ p->pi_state_cache = NULL;
+
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1159,18 +1175,6 @@ static task_t *copy_process(unsigned long clone_flags,
}
if (clone_flags & CLONE_THREAD) {
- /*
- * Important: if an exit-all has been started then
- * do not create this new thread - the whole thread
- * group is supposed to exit anyway.
- */
- if (current->signal->flags & SIGNAL_GROUP_EXIT) {
- spin_unlock(&current->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
- retval = -EAGAIN;
- goto bad_fork_cleanup_namespace;
- }
-
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
diff --git a/kernel/futex.c b/kernel/futex.c
index e1a380c77a5a..6c91f938005d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -12,6 +12,10 @@
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
+ * PI-futex support started by Ingo Molnar and Thomas Gleixner
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
@@ -46,6 +50,8 @@
#include <linux/signal.h>
#include <asm/futex.h>
+#include "rtmutex_common.h"
+
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
@@ -63,7 +69,7 @@ union futex_key {
int offset;
} shared;
struct {
- unsigned long uaddr;
+ unsigned long address;
struct mm_struct *mm;
int offset;
} private;
@@ -75,6 +81,27 @@ union futex_key {
};
/*
+ * Priority Inheritance state:
+ */
+struct futex_pi_state {
+ /*
+ * list of 'owned' pi_state instances - these have to be
+ * cleaned up in do_exit() if the task exits prematurely:
+ */
+ struct list_head list;
+
+ /*
+ * The PI object:
+ */
+ struct rt_mutex pi_mutex;
+
+ struct task_struct *owner;
+ atomic_t refcount;
+
+ union futex_key key;
+};
+
+/*
* We use this hashed waitqueue instead of a normal wait_queue_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
@@ -87,15 +114,19 @@ struct futex_q {
struct list_head list;
wait_queue_head_t waiters;
- /* Which hash list lock to use. */
+ /* Which hash list lock to use: */
spinlock_t *lock_ptr;
- /* Key which the futex is hashed on. */
+ /* Key which the futex is hashed on: */
union futex_key key;
- /* For fd, sigio sent using these. */
+ /* For fd, sigio sent using these: */
int fd;
struct file *filp;
+
+ /* Optional priority inheritance state: */
+ struct futex_pi_state *pi_state;
+ struct task_struct *task;
};
/*
@@ -144,8 +175,9 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
*
* Should be called with &current->mm->mmap_sem but NOT any spinlocks.
*/
-static int get_futex_key(unsigned long uaddr, union futex_key *key)
+static int get_futex_key(u32 __user *uaddr, union futex_key *key)
{
+ unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct page *page;
@@ -154,16 +186,16 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
/*
* The futex address must be "naturally" aligned.
*/
- key->both.offset = uaddr % PAGE_SIZE;
+ key->both.offset = address % PAGE_SIZE;
if (unlikely((key->both.offset % sizeof(u32)) != 0))
return -EINVAL;
- uaddr -= key->both.offset;
+ address -= key->both.offset;
/*
* The futex is hashed differently depending on whether
* it's in a shared or private mapping. So check vma first.
*/
- vma = find_extend_vma(mm, uaddr);
+ vma = find_extend_vma(mm, address);
if (unlikely(!vma))
return -EFAULT;
@@ -184,7 +216,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
*/
if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
key->private.mm = mm;
- key->private.uaddr = uaddr;
+ key->private.address = address;
return 0;
}
@@ -194,7 +226,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
key->shared.inode = vma->vm_file->f_dentry->d_inode;
key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
- key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT)
+ key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
+ vma->vm_pgoff);
return 0;
}
@@ -205,7 +237,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
* from swap. But that's a lot of code to duplicate here
* for a rare case, so we simply fetch the page.
*/
- err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
+ err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
if (err >= 0) {
key->shared.pgoff =
page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -246,18 +278,244 @@ static void drop_key_refs(union futex_key *key)
}
}
-static inline int get_futex_value_locked(int *dest, int __user *from)
+static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
inc_preempt_count();
- ret = __copy_from_user_inatomic(dest, from, sizeof(int));
+ ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
dec_preempt_count();
return ret ? -EFAULT : 0;
}
/*
+ * Fault handling. Called with current->mm->mmap_sem held.
+ */
+static int futex_handle_fault(unsigned long address, int attempt)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+
+ if (attempt >= 2 || !(vma = find_vma(mm, address)) ||
+ vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
+ return -EFAULT;
+
+ switch (handle_mm_fault(mm, vma, address, 1)) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ default:
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * PI code:
+ */
+static int refill_pi_state_cache(void)
+{
+ struct futex_pi_state *pi_state;
+
+ if (likely(current->pi_state_cache))
+ return 0;
+
+ pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
+
+ if (!pi_state)
+ return -ENOMEM;
+
+ memset(pi_state, 0, sizeof(*pi_state));
+ INIT_LIST_HEAD(&pi_state->list);
+ /* pi_mutex gets initialized later */
+ pi_state->owner = NULL;
+ atomic_set(&pi_state->refcount, 1);
+
+ current->pi_state_cache = pi_state;
+
+ return 0;
+}
+
+static struct futex_pi_state * alloc_pi_state(void)
+{
+ struct futex_pi_state *pi_state = current->pi_state_cache;
+
+ WARN_ON(!pi_state);
+ current->pi_state_cache = NULL;
+
+ return pi_state;
+}
+
+static void free_pi_state(struct futex_pi_state *pi_state)
+{
+ if (!atomic_dec_and_test(&pi_state->refcount))
+ return;
+
+ /*
+ * If pi_state->owner is NULL, the owner is most probably dying
+ * and has cleaned up the pi_state already
+ */
+ if (pi_state->owner) {
+ spin_lock_irq(&pi_state->owner->pi_lock);
+ list_del_init(&pi_state->list);
+ spin_unlock_irq(&pi_state->owner->pi_lock);
+
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
+ }
+
+ if (current->pi_state_cache)
+ kfree(pi_state);
+ else {
+ /*
+ * pi_state->list is already empty.
+ * clear pi_state->owner.
+ * refcount is at 0 - put it back to 1.
+ */
+ pi_state->owner = NULL;
+ atomic_set(&pi_state->refcount, 1);
+ current->pi_state_cache = pi_state;
+ }
+}
+
+/*
+ * Look up the task based on what TID userspace gave us.
+ * We dont trust it.
+ */
+static struct task_struct * futex_find_get_task(pid_t pid)
+{
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ if ((current->euid != p->euid) && (current->euid != p->uid)) {
+ p = NULL;
+ goto out_unlock;
+ }
+ if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) {
+ p = NULL;
+ goto out_unlock;
+ }
+ get_task_struct(p);
+out_unlock:
+ read_unlock(&tasklist_lock);
+
+ return p;
+}
+
+/*
+ * This task is holding PI mutexes at exit time => bad.
+ * Kernel cleans up PI-state, but userspace is likely hosed.
+ * (Robust-futex cleanup is separate and might save the day for userspace.)
+ */
+void exit_pi_state_list(struct task_struct *curr)
+{
+ struct futex_hash_bucket *hb;
+ struct list_head *next, *head = &curr->pi_state_list;
+ struct futex_pi_state *pi_state;
+ union futex_key key;
+
+ /*
+ * We are a ZOMBIE and nobody can enqueue itself on
+ * pi_state_list anymore, but we have to be careful
+ * versus waiters unqueueing themselfs
+ */
+ spin_lock_irq(&curr->pi_lock);
+ while (!list_empty(head)) {
+
+ next = head->next;
+ pi_state = list_entry(next, struct futex_pi_state, list);
+ key = pi_state->key;
+ spin_unlock_irq(&curr->pi_lock);
+
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+
+ spin_lock_irq(&curr->pi_lock);
+ if (head->next != next) {
+ spin_unlock(&hb->lock);
+ continue;
+ }
+
+ list_del_init(&pi_state->list);
+
+ WARN_ON(pi_state->owner != curr);
+
+ pi_state->owner = NULL;
+ spin_unlock_irq(&curr->pi_lock);
+
+ rt_mutex_unlock(&pi_state->pi_mutex);
+
+ spin_unlock(&hb->lock);
+
+ spin_lock_irq(&curr->pi_lock);
+ }
+ spin_unlock_irq(&curr->pi_lock);
+}
+
+static int
+lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
+{
+ struct futex_pi_state *pi_state = NULL;
+ struct futex_q *this, *next;
+ struct list_head *head;
+ struct task_struct *p;
+ pid_t pid;
+
+ head = &hb->chain;
+
+ list_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &me->key)) {
+ /*
+ * Another waiter already exists - bump up
+ * the refcount and return its pi_state:
+ */
+ pi_state = this->pi_state;
+ atomic_inc(&pi_state->refcount);
+ me->pi_state = pi_state;
+
+ return 0;
+ }
+ }
+
+ /*
+ * We are the first waiter - try to look up the real owner and
+ * attach the new pi_state to it:
+ */
+ pid = uval & FUTEX_TID_MASK;
+ p = futex_find_get_task(pid);
+ if (!p)
+ return -ESRCH;
+
+ pi_state = alloc_pi_state();
+
+ /*
+ * Initialize the pi_mutex in locked state and make 'p'
+ * the owner of it:
+ */
+ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
+
+ /* Store the key for possible exit cleanups: */
+ pi_state->key = me->key;
+
+ spin_lock_irq(&p->pi_lock);
+ list_add(&pi_state->list, &p->pi_state_list);
+ pi_state->owner = p;
+ spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
+
+ me->pi_state = pi_state;
+
+ return 0;
+}
+
+/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
@@ -284,16 +542,80 @@ static void wake_futex(struct futex_q *q)
q->lock_ptr = NULL;
}
+static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+{
+ struct task_struct *new_owner;
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 curval, newval;
+
+ if (!pi_state)
+ return -EINVAL;
+
+ new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
+
+ /*
+ * This happens when we have stolen the lock and the original
+ * pending owner did not enqueue itself back on the rt_mutex.
+ * Thats not a tragedy. We know that way, that a lock waiter
+ * is on the fly. We make the futex_q waiter the pending owner.
+ */
+ if (!new_owner)
+ new_owner = this->task;
+
+ /*
+ * We pass it to the next owner. (The WAITERS bit is always
+ * kept enabled while there is PI state around. We must also
+ * preserve the owner died bit.)
+ */
+ newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+ dec_preempt_count();
+
+ if (curval == -EFAULT)
+ return -EFAULT;
+ if (curval != uval)
+ return -EINVAL;
+
+ list_del_init(&pi_state->owner->pi_state_list);
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ rt_mutex_unlock(&pi_state->pi_mutex);
+
+ return 0;
+}
+
+static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
+{
+ u32 oldval;
+
+ /*
+ * There is no waiter, so we unlock the futex. The owner died
+ * bit has not to be preserved here. We are the owner:
+ */
+ inc_preempt_count();
+ oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
+ dec_preempt_count();
+
+ if (oldval == -EFAULT)
+ return oldval;
+ if (oldval != uval)
+ return -EAGAIN;
+
+ return 0;
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
-static int futex_wake(unsigned long uaddr, int nr_wake)
+static int futex_wake(u32 __user *uaddr, int nr_wake)
{
- union futex_key key;
- struct futex_hash_bucket *bh;
- struct list_head *head;
+ struct futex_hash_bucket *hb;
struct futex_q *this, *next;
+ struct list_head *head;
+ union futex_key key;
int ret;
down_read(&current->mm->mmap_sem);
@@ -302,19 +624,21 @@ static int futex_wake(unsigned long uaddr, int nr_wake)
if (unlikely(ret != 0))
goto out;
- bh = hash_futex(&key);
- spin_lock(&bh->lock);
- head = &bh->chain;
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+ head = &hb->chain;
list_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
+ if (this->pi_state)
+ return -EINVAL;
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
- spin_unlock(&bh->lock);
+ spin_unlock(&hb->lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -324,10 +648,12 @@ out:
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
-static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op)
+static int
+futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
+ int nr_wake, int nr_wake2, int op)
{
union futex_key key1, key2;
- struct futex_hash_bucket *bh1, *bh2;
+ struct futex_hash_bucket *hb1, *hb2;
struct list_head *head;
struct futex_q *this, *next;
int ret, op_ret, attempt = 0;
@@ -342,27 +668,29 @@ retryfull:
if (unlikely(ret != 0))
goto out;
- bh1 = hash_futex(&key1);
- bh2 = hash_futex(&key2);
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
retry:
- if (bh1 < bh2)
- spin_lock(&bh1->lock);
- spin_lock(&bh2->lock);
- if (bh1 > bh2)
- spin_lock(&bh1->lock);
+ if (hb1 < hb2)
+ spin_lock(&hb1->lock);
+ spin_lock(&hb2->lock);
+ if (hb1 > hb2)
+ spin_lock(&hb1->lock);
- op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2);
+ op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
- int dummy;
+ u32 dummy;
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
#ifndef CONFIG_MMU
- /* we don't get EFAULT from MMU faults if we don't have an MMU,
- * but we might get them from range checking */
+ /*
+ * we don't get EFAULT from MMU faults if we don't have an MMU,
+ * but we might get them from range checking
+ */
ret = op_ret;
goto out;
#endif
@@ -372,47 +700,34 @@ retry:
goto out;
}
- /* futex_atomic_op_inuser needs to both read and write
+ /*
+ * futex_atomic_op_inuser needs to both read and write
* *(int __user *)uaddr2, but we can't modify it
* non-atomically. Therefore, if get_user below is not
* enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem. */
+ * still holding the mmap_sem.
+ */
if (attempt++) {
- struct vm_area_struct * vma;
- struct mm_struct *mm = current->mm;
-
- ret = -EFAULT;
- if (attempt >= 2 ||
- !(vma = find_vma(mm, uaddr2)) ||
- vma->vm_start > uaddr2 ||
- !(vma->vm_flags & VM_WRITE))
- goto out;
-
- switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- default:
+ if (futex_handle_fault((unsigned long)uaddr2,
+ attempt))
goto out;
- }
goto retry;
}
- /* If we would have faulted, release mmap_sem,
- * fault it in and start all over again. */
+ /*
+ * If we would have faulted, release mmap_sem,
+ * fault it in and start all over again.
+ */
up_read(&current->mm->mmap_sem);
- ret = get_user(dummy, (int __user *)uaddr2);
+ ret = get_user(dummy, uaddr2);
if (ret)
return ret;
goto retryfull;
}
- head = &bh1->chain;
+ head = &hb1->chain;
list_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
@@ -423,7 +738,7 @@ retry:
}
if (op_ret > 0) {
- head = &bh2->chain;
+ head = &hb2->chain;
op_ret = 0;
list_for_each_entry_safe(this, next, head, list) {
@@ -436,9 +751,9 @@ retry:
ret += op_ret;
}
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -448,11 +763,11 @@ out:
* Requeue all waiters hashed on one physical page to another
* physical page.
*/
-static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
- int nr_wake, int nr_requeue, int *valp)
+static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
+ int nr_wake, int nr_requeue, u32 *cmpval)
{
union futex_key key1, key2;
- struct futex_hash_bucket *bh1, *bh2;
+ struct futex_hash_bucket *hb1, *hb2;
struct list_head *head1;
struct futex_q *this, *next;
int ret, drop_count = 0;
@@ -467,68 +782,72 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
if (unlikely(ret != 0))
goto out;
- bh1 = hash_futex(&key1);
- bh2 = hash_futex(&key2);
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
- if (bh1 < bh2)
- spin_lock(&bh1->lock);
- spin_lock(&bh2->lock);
- if (bh1 > bh2)
- spin_lock(&bh1->lock);
+ if (hb1 < hb2)
+ spin_lock(&hb1->lock);
+ spin_lock(&hb2->lock);
+ if (hb1 > hb2)
+ spin_lock(&hb1->lock);
- if (likely(valp != NULL)) {
- int curval;
+ if (likely(cmpval != NULL)) {
+ u32 curval;
- ret = get_futex_value_locked(&curval, (int __user *)uaddr1);
+ ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
- /* If we would have faulted, release mmap_sem, fault
+ /*
+ * If we would have faulted, release mmap_sem, fault
* it in and start all over again.
*/
up_read(&current->mm->mmap_sem);
- ret = get_user(curval, (int __user *)uaddr1);
+ ret = get_user(curval, uaddr1);
if (!ret)
goto retry;
return ret;
}
- if (curval != *valp) {
+ if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
- head1 = &bh1->chain;
+ head1 = &hb1->chain;
list_for_each_entry_safe(this, next, head1, list) {
if (!match_futex (&this->key, &key1))
continue;
if (++ret <= nr_wake) {
wake_futex(this);
} else {
- list_move_tail(&this->list, &bh2->chain);
- this->lock_ptr = &bh2->lock;
+ /*
+ * If key1 and key2 hash to the same bucket, no need to
+ * requeue.
+ */
+ if (likely(head1 != &hb2->chain)) {
+ list_move_tail(&this->list, &hb2->chain);
+ this->lock_ptr = &hb2->lock;
+ }
this->key = key2;
get_key_refs(&key2);
drop_count++;
if (ret - nr_wake >= nr_requeue)
break;
- /* Make sure to stop if key1 == key2 */
- if (head1 == &bh2->chain && head1 != &next->list)
- head1 = &this->list;
}
}
out_unlock:
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
/* drop_key_refs() must be called outside the spinlocks. */
while (--drop_count >= 0)
@@ -543,7 +862,7 @@ out:
static inline struct futex_hash_bucket *
queue_lock(struct futex_q *q, int fd, struct file *filp)
{
- struct futex_hash_bucket *bh;
+ struct futex_hash_bucket *hb;
q->fd = fd;
q->filp = filp;
@@ -551,23 +870,24 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
init_waitqueue_head(&q->waiters);
get_key_refs(&q->key);
- bh = hash_futex(&q->key);
- q->lock_ptr = &bh->lock;
+ hb = hash_futex(&q->key);
+ q->lock_ptr = &hb->lock;
- spin_lock(&bh->lock);
- return bh;
+ spin_lock(&hb->lock);
+ return hb;
}
-static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh)
+static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
{
- list_add_tail(&q->list, &bh->chain);
- spin_unlock(&bh->lock);
+ list_add_tail(&q->list, &hb->chain);
+ q->task = current;
+ spin_unlock(&hb->lock);
}
static inline void
-queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh)
+queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
- spin_unlock(&bh->lock);
+ spin_unlock(&hb->lock);
drop_key_refs(&q->key);
}
@@ -579,16 +899,17 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh)
/* The key must be already stored in q->key. */
static void queue_me(struct futex_q *q, int fd, struct file *filp)
{
- struct futex_hash_bucket *bh;
- bh = queue_lock(q, fd, filp);
- __queue_me(q, bh);
+ struct futex_hash_bucket *hb;
+
+ hb = queue_lock(q, fd, filp);
+ __queue_me(q, hb);
}
/* Return 1 if we were still queued (ie. 0 means we were woken) */
static int unqueue_me(struct futex_q *q)
{
- int ret = 0;
spinlock_t *lock_ptr;
+ int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
@@ -614,6 +935,9 @@ static int unqueue_me(struct futex_q *q)
}
WARN_ON(list_empty(&q->list));
list_del(&q->list);
+
+ BUG_ON(q->pi_state);
+
spin_unlock(lock_ptr);
ret = 1;
}
@@ -622,21 +946,42 @@ static int unqueue_me(struct futex_q *q)
return ret;
}
-static int futex_wait(unsigned long uaddr, int val, unsigned long time)
+/*
+ * PI futexes can not be requeued and must remove themself from the
+ * hash bucket. The hash bucket lock is held on entry and dropped here.
+ */
+static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
{
- DECLARE_WAITQUEUE(wait, current);
- int ret, curval;
+ WARN_ON(list_empty(&q->list));
+ list_del(&q->list);
+
+ BUG_ON(!q->pi_state);
+ free_pi_state(q->pi_state);
+ q->pi_state = NULL;
+
+ spin_unlock(&hb->lock);
+
+ drop_key_refs(&q->key);
+}
+
+static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
+{
+ struct task_struct *curr = current;
+ DECLARE_WAITQUEUE(wait, curr);
+ struct futex_hash_bucket *hb;
struct futex_q q;
- struct futex_hash_bucket *bh;
+ u32 uval;
+ int ret;
+ q.pi_state = NULL;
retry:
- down_read(&current->mm->mmap_sem);
+ down_read(&curr->mm->mmap_sem);
ret = get_futex_key(uaddr, &q.key);
if (unlikely(ret != 0))
goto out_release_sem;
- bh = queue_lock(&q, -1, NULL);
+ hb = queue_lock(&q, -1, NULL);
/*
* Access the page AFTER the futex is queued.
@@ -658,37 +1003,35 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
* We hold the mmap semaphore, so the mapping cannot have changed
* since we looked it up in get_futex_key.
*/
-
- ret = get_futex_value_locked(&curval, (int __user *)uaddr);
+ ret = get_futex_value_locked(&uval, uaddr);
if (unlikely(ret)) {
- queue_unlock(&q, bh);
+ queue_unlock(&q, hb);
- /* If we would have faulted, release mmap_sem, fault it in and
+ /*
+ * If we would have faulted, release mmap_sem, fault it in and
* start all over again.
*/
- up_read(&current->mm->mmap_sem);
+ up_read(&curr->mm->mmap_sem);
- ret = get_user(curval, (int __user *)uaddr);
+ ret = get_user(uval, uaddr);
if (!ret)
goto retry;
return ret;
}
- if (curval != val) {
- ret = -EWOULDBLOCK;
- queue_unlock(&q, bh);
- goto out_release_sem;
- }
+ ret = -EWOULDBLOCK;
+ if (uval != val)
+ goto out_unlock_release_sem;
/* Only actually queue if *uaddr contained val. */
- __queue_me(&q, bh);
+ __queue_me(&q, hb);
/*
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
- */
- up_read(&current->mm->mmap_sem);
+ */
+ up_read(&curr->mm->mmap_sem);
/*
* There might have been scheduling since the queue_me(), as we
@@ -720,12 +1063,421 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
return 0;
if (time == 0)
return -ETIMEDOUT;
- /* We expect signal_pending(current), but another thread may
- * have handled it for us already. */
+ /*
+ * We expect signal_pending(current), but another thread may
+ * have handled it for us already.
+ */
return -EINTR;
+ out_unlock_release_sem:
+ queue_unlock(&q, hb);
+
out_release_sem:
+ up_read(&curr->mm->mmap_sem);
+ return ret;
+}
+
+/*
+ * Userspace tried a 0 -> TID atomic transition of the futex value
+ * and failed. The kernel side here does the whole locking operation:
+ * if there are waiters then it will block, it does PI, etc. (Due to
+ * races the kernel might see a 0 value of the futex too.)
+ */
+static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
+ struct hrtimer_sleeper *to)
+{
+ struct task_struct *curr = current;
+ struct futex_hash_bucket *hb;
+ u32 uval, newval, curval;
+ struct futex_q q;
+ int ret, attempt = 0;
+
+ if (refill_pi_state_cache())
+ return -ENOMEM;
+
+ q.pi_state = NULL;
+ retry:
+ down_read(&curr->mm->mmap_sem);
+
+ ret = get_futex_key(uaddr, &q.key);
+ if (unlikely(ret != 0))
+ goto out_release_sem;
+
+ hb = queue_lock(&q, -1, NULL);
+
+ retry_locked:
+ /*
+ * To avoid races, we attempt to take the lock here again
+ * (by doing a 0 -> TID atomic cmpxchg), while holding all
+ * the locks. It will most likely not succeed.
+ */
+ newval = current->pid;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+
+ /* We own the lock already */
+ if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
+ if (!detect && 0)
+ force_sig(SIGKILL, current);
+ ret = -EDEADLK;
+ goto out_unlock_release_sem;
+ }
+
+ /*
+ * Surprise - we got the lock. Just return
+ * to userspace:
+ */
+ if (unlikely(!curval))
+ goto out_unlock_release_sem;
+
+ uval = curval;
+ newval = uval | FUTEX_WAITERS;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+ if (unlikely(curval != uval))
+ goto retry_locked;
+
+ /*
+ * We dont have the lock. Look up the PI state (or create it if
+ * we are the first waiter):
+ */
+ ret = lookup_pi_state(uval, hb, &q);
+
+ if (unlikely(ret)) {
+ /*
+ * There were no waiters and the owner task lookup
+ * failed. When the OWNER_DIED bit is set, then we
+ * know that this is a robust futex and we actually
+ * take the lock. This is safe as we are protected by
+ * the hash bucket lock. We also set the waiters bit
+ * unconditionally here, to simplify glibc handling of
+ * multiple tasks racing to acquire the lock and
+ * cleanup the problems which were left by the dead
+ * owner.
+ */
+ if (curval & FUTEX_OWNER_DIED) {
+ uval = newval;
+ newval = current->pid |
+ FUTEX_OWNER_DIED | FUTEX_WAITERS;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr,
+ uval, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+ if (unlikely(curval != uval))
+ goto retry_locked;
+ ret = 0;
+ }
+ goto out_unlock_release_sem;
+ }
+
+ /*
+ * Only actually queue now that the atomic ops are done:
+ */
+ __queue_me(&q, hb);
+
+ /*
+ * Now the futex is queued and we have checked the data, we
+ * don't want to hold mmap_sem while we sleep.
+ */
+ up_read(&curr->mm->mmap_sem);
+
+ WARN_ON(!q.pi_state);
+ /*
+ * Block on the PI mutex:
+ */
+ if (!trylock)
+ ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
+ else {
+ ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
+ /* Fixup the trylock return value: */
+ ret = ret ? 0 : -EWOULDBLOCK;
+ }
+
+ down_read(&curr->mm->mmap_sem);
+ hb = queue_lock(&q, -1, NULL);
+
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (!ret && q.pi_state->owner != curr) {
+ u32 newtid = current->pid | FUTEX_WAITERS;
+
+ /* Owner died? */
+ if (q.pi_state->owner != NULL) {
+ spin_lock_irq(&q.pi_state->owner->pi_lock);
+ list_del_init(&q.pi_state->list);
+ spin_unlock_irq(&q.pi_state->owner->pi_lock);
+ } else
+ newtid |= FUTEX_OWNER_DIED;
+
+ q.pi_state->owner = current;
+
+ spin_lock_irq(&current->pi_lock);
+ list_add(&q.pi_state->list, &current->pi_state_list);
+ spin_unlock_irq(&current->pi_lock);
+
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+ /*
+ * We own it, so we have to replace the pending owner
+ * TID. This must be atomic as we have preserve the
+ * owner died bit here.
+ */
+ ret = get_user(uval, uaddr);
+ while (!ret) {
+ newval = (uval & FUTEX_OWNER_DIED) | newtid;
+ curval = futex_atomic_cmpxchg_inatomic(uaddr,
+ uval, newval);
+ if (curval == -EFAULT)
+ ret = -EFAULT;
+ if (curval == uval)
+ break;
+ uval = curval;
+ }
+ } else {
+ /*
+ * Catch the rare case, where the lock was released
+ * when we were on the way back before we locked
+ * the hash bucket.
+ */
+ if (ret && q.pi_state->owner == curr) {
+ if (rt_mutex_trylock(&q.pi_state->pi_mutex))
+ ret = 0;
+ }
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+ }
+
+ if (!detect && ret == -EDEADLK && 0)
+ force_sig(SIGKILL, current);
+
+ return ret;
+
+ out_unlock_release_sem:
+ queue_unlock(&q, hb);
+
+ out_release_sem:
+ up_read(&curr->mm->mmap_sem);
+ return ret;
+
+ uaddr_faulted:
+ /*
+ * We have to r/w *(int __user *)uaddr, but we can't modify it
+ * non-atomically. Therefore, if get_user below is not
+ * enough, we need to handle the fault ourselves, while
+ * still holding the mmap_sem.
+ */
+ if (attempt++) {
+ if (futex_handle_fault((unsigned long)uaddr, attempt))
+ goto out_unlock_release_sem;
+
+ goto retry_locked;
+ }
+
+ queue_unlock(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+
+ ret = get_user(uval, uaddr);
+ if (!ret && (uval != -EFAULT))
+ goto retry;
+
+ return ret;
+}
+
+/*
+ * Restart handler
+ */
+static long futex_lock_pi_restart(struct restart_block *restart)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ int ret;
+
+ restart->fn = do_no_restart_syscall;
+
+ if (restart->arg2 || restart->arg3) {
+ to = &timeout;
+ hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
+ hrtimer_init_sleeper(to, current);
+ to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
+ (u64) restart->arg0;
+ }
+
+ pr_debug("lock_pi restart: %p, %d (%d)\n",
+ (u32 __user *)restart->arg0, current->pid);
+
+ ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
+ 0, to);
+
+ if (ret != -EINTR)
+ return ret;
+
+ restart->fn = futex_lock_pi_restart;
+
+ /* The other values are filled in */
+ return -ERESTART_RESTARTBLOCK;
+}
+
+/*
+ * Called from the syscall entry below.
+ */
+static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
+ long nsec, int trylock)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct restart_block *restart;
+ int ret;
+
+ if (sec != MAX_SCHEDULE_TIMEOUT) {
+ to = &timeout;
+ hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
+ hrtimer_init_sleeper(to, current);
+ to->timer.expires = ktime_set(sec, nsec);
+ }
+
+ ret = do_futex_lock_pi(uaddr, detect, trylock, to);
+
+ if (ret != -EINTR)
+ return ret;
+
+ pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
+
+ restart = &current_thread_info()->restart_block;
+ restart->fn = futex_lock_pi_restart;
+ restart->arg0 = (unsigned long) uaddr;
+ restart->arg1 = detect;
+ if (to) {
+ restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
+ restart->arg3 = to->timer.expires.tv64 >> 32;
+ } else
+ restart->arg2 = restart->arg3 = 0;
+
+ return -ERESTART_RESTARTBLOCK;
+}
+
+/*
+ * Userspace attempted a TID -> 0 atomic transition, and failed.
+ * This is the in-kernel slowpath: we look up the PI state (if any),
+ * and do the rt-mutex unlock.
+ */
+static int futex_unlock_pi(u32 __user *uaddr)
+{
+ struct futex_hash_bucket *hb;
+ struct futex_q *this, *next;
+ u32 uval;
+ struct list_head *head;
+ union futex_key key;
+ int ret, attempt = 0;
+
+retry:
+ if (get_user(uval, uaddr))
+ return -EFAULT;
+ /*
+ * We release only a lock we actually own:
+ */
+ if ((uval & FUTEX_TID_MASK) != current->pid)
+ return -EPERM;
+ /*
+ * First take all the futex related locks:
+ */
+ down_read(&current->mm->mmap_sem);
+
+ ret = get_futex_key(uaddr, &key);
+ if (unlikely(ret != 0))
+ goto out;
+
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+
+retry_locked:
+ /*
+ * To avoid races, try to do the TID -> 0 atomic transition
+ * again. If it succeeds then we can return without waking
+ * anyone else up:
+ */
+ inc_preempt_count();
+ uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
+ dec_preempt_count();
+
+ if (unlikely(uval == -EFAULT))
+ goto pi_faulted;
+ /*
+ * Rare case: we managed to release the lock atomically,
+ * no need to wake anyone else up:
+ */
+ if (unlikely(uval == current->pid))
+ goto out_unlock;
+
+ /*
+ * Ok, other tasks may need to be woken up - check waiters
+ * and do the wakeup if necessary:
+ */
+ head = &hb->chain;
+
+ list_for_each_entry_safe(this, next, head, list) {
+ if (!match_futex (&this->key, &key))
+ continue;
+ ret = wake_futex_pi(uaddr, uval, this);
+ /*
+ * The atomic access to the futex value
+ * generated a pagefault, so retry the
+ * user-access and the wakeup:
+ */
+ if (ret == -EFAULT)
+ goto pi_faulted;
+ goto out_unlock;
+ }
+ /*
+ * No waiters - kernel unlocks the futex:
+ */
+ ret = unlock_futex_pi(uaddr, uval);
+ if (ret == -EFAULT)
+ goto pi_faulted;
+
+out_unlock:
+ spin_unlock(&hb->lock);
+out:
up_read(&current->mm->mmap_sem);
+
+ return ret;
+
+pi_faulted:
+ /*
+ * We have to r/w *(int __user *)uaddr, but we can't modify it
+ * non-atomically. Therefore, if get_user below is not
+ * enough, we need to handle the fault ourselves, while
+ * still holding the mmap_sem.
+ */
+ if (attempt++) {
+ if (futex_handle_fault((unsigned long)uaddr, attempt))
+ goto out_unlock;
+
+ goto retry_locked;
+ }
+
+ spin_unlock(&hb->lock);
+ up_read(&current->mm->mmap_sem);
+
+ ret = get_user(uval, uaddr);
+ if (!ret && (uval != -EFAULT))
+ goto retry;
+
return ret;
}
@@ -735,6 +1487,7 @@ static int futex_close(struct inode *inode, struct file *filp)
unqueue_me(q);
kfree(q);
+
return 0;
}
@@ -766,7 +1519,7 @@ static struct file_operations futex_fops = {
* Signal allows caller to avoid the race which would occur if they
* set the sigio stuff up afterwards.
*/
-static int futex_fd(unsigned long uaddr, int signal)
+static int futex_fd(u32 __user *uaddr, int signal)
{
struct futex_q *q;
struct file *filp;
@@ -803,6 +1556,7 @@ static int futex_fd(unsigned long uaddr, int signal)
err = -ENOMEM;
goto error;
}
+ q->pi_state = NULL;
down_read(&current->mm->mmap_sem);
err = get_futex_key(uaddr, &q->key);
@@ -840,7 +1594,7 @@ error:
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
- * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is
+ * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
@@ -915,7 +1669,7 @@ err_unlock:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
{
- u32 uval;
+ u32 uval, nval;
retry:
if (get_user(uval, uaddr))
@@ -932,12 +1686,16 @@ retry:
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
- if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
- uval | FUTEX_OWNER_DIED) != uval)
+ nval = futex_atomic_cmpxchg_inatomic(uaddr, uval,
+ uval | FUTEX_OWNER_DIED);
+ if (nval == -EFAULT)
+ return -1;
+
+ if (nval != uval)
goto retry;
if (uval & FUTEX_WAITERS)
- futex_wake((unsigned long)uaddr, 1);
+ futex_wake(uaddr, 1);
}
return 0;
}
@@ -978,7 +1736,7 @@ void exit_robust_list(struct task_struct *curr)
while (entry != &head->list) {
/*
* A pending lock might already be on the list, so
- * dont process it twice:
+ * don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void *)entry + futex_offset,
@@ -999,8 +1757,8 @@ void exit_robust_list(struct task_struct *curr)
}
}
-long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
- unsigned long uaddr2, int val2, int val3)
+long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3)
{
int ret;
@@ -1024,6 +1782,15 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
case FUTEX_WAKE_OP:
ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
break;
+ case FUTEX_LOCK_PI:
+ ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
+ break;
+ case FUTEX_UNLOCK_PI:
+ ret = futex_unlock_pi(uaddr);
+ break;
+ case FUTEX_TRYLOCK_PI:
+ ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
+ break;
default:
ret = -ENOSYS;
}
@@ -1031,29 +1798,33 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
}
-asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct timespec __user *utime, u32 __user *uaddr2,
- int val3)
+ u32 val3)
{
struct timespec t;
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
- int val2 = 0;
+ u32 val2 = 0;
- if (utime && (op == FUTEX_WAIT)) {
+ if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
if (copy_from_user(&t, utime, sizeof(t)) != 0)
return -EFAULT;
if (!timespec_valid(&t))
return -EINVAL;
- timeout = timespec_to_jiffies(&t) + 1;
+ if (op == FUTEX_WAIT)
+ timeout = timespec_to_jiffies(&t) + 1;
+ else {
+ timeout = t.tv_sec;
+ val2 = t.tv_nsec;
+ }
}
/*
* requeue parameter in 'utime' if op == FUTEX_REQUEUE.
*/
- if (op >= FUTEX_REQUEUE)
- val2 = (int) (unsigned long) utime;
+ if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
+ val2 = (u32) (unsigned long) utime;
- return do_futex((unsigned long)uaddr, op, val, timeout,
- (unsigned long)uaddr2, val2, val3);
+ return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
}
static int futexfs_get_sb(struct file_system_type *fs_type,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 1ab6a0ea3d14..d1d92b441fb7 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -129,16 +129,20 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
int val2 = 0;
- if (utime && (op == FUTEX_WAIT)) {
+ if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
if (get_compat_timespec(&t, utime))
return -EFAULT;
if (!timespec_valid(&t))
return -EINVAL;
- timeout = timespec_to_jiffies(&t) + 1;
+ if (op == FUTEX_WAIT)
+ timeout = timespec_to_jiffies(&t) + 1;
+ else {
+ timeout = t.tv_sec;
+ val2 = t.tv_nsec;
+ }
}
- if (op >= FUTEX_REQUEUE)
+ if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
val2 = (int) (unsigned long) utime;
- return do_futex((unsigned long)uaddr, op, val, timeout,
- (unsigned long)uaddr2, val2, val3);
+ return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 55601b3ce60e..8d3dc29ef41a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int hrtimer_cpu_notify(struct notifier_block *self,
+static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -857,7 +857,7 @@ static int hrtimer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block hrtimers_nb = {
+static struct notifier_block __devinitdata hrtimers_nb = {
.notifier_call = hrtimer_cpu_notify,
};
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 9f77f50d8143..1dab0ac3f797 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,5 @@
-obj-y := handle.o manage.o spurious.o
+obj-y := handle.o manage.o spurious.o resend.o chip.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 3467097ca61a..533068cfb607 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -11,12 +11,14 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include "internals.h"
+
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
* "IRQ_WAITING" cleared and the interrupt disabled.
*/
-static DECLARE_MUTEX(probe_sem);
+static DEFINE_MUTEX(probing_active);
/**
* probe_irq_on - begin an interrupt autodetect
@@ -27,11 +29,11 @@ static DECLARE_MUTEX(probe_sem);
*/
unsigned long probe_irq_on(void)
{
- unsigned long val;
- irq_desc_t *desc;
+ struct irq_desc *desc;
+ unsigned long mask;
unsigned int i;
- down(&probe_sem);
+ mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
@@ -40,8 +42,21 @@ unsigned long probe_irq_on(void)
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
- if (!irq_desc[i].action)
- irq_desc[i].handler->startup(i);
+ if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
+ /*
+ * An old-style architecture might still have
+ * the handle_bad_irq handler there:
+ */
+ compat_irq_chip_set_default_handler(desc);
+
+ /*
+ * Some chips need to know about probing in
+ * progress:
+ */
+ if (desc->chip->set_type)
+ desc->chip->set_type(i, IRQ_TYPE_PROBE);
+ desc->chip->startup(i);
+ }
spin_unlock_irq(&desc->lock);
}
@@ -57,9 +72,9 @@ unsigned long probe_irq_on(void)
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
- if (!desc->action) {
+ if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->handler->startup(i))
+ if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
spin_unlock_irq(&desc->lock);
@@ -73,11 +88,11 @@ unsigned long probe_irq_on(void)
/*
* Now filter out any obviously spurious interrupts
*/
- val = 0;
+ mask = 0;
for (i = 0; i < NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
unsigned int status;
+ desc = irq_desc + i;
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -85,17 +100,16 @@ unsigned long probe_irq_on(void)
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
+ desc->chip->shutdown(i);
} else
if (i < 32)
- val |= 1 << i;
+ mask |= 1 << i;
}
spin_unlock_irq(&desc->lock);
}
- return val;
+ return mask;
}
-
EXPORT_SYMBOL(probe_irq_on);
/**
@@ -117,7 +131,7 @@ unsigned int probe_irq_mask(unsigned long val)
mask = 0;
for (i = 0; i < NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
+ struct irq_desc *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
@@ -128,11 +142,11 @@ unsigned int probe_irq_mask(unsigned long val)
mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
+ desc->chip->shutdown(i);
}
spin_unlock_irq(&desc->lock);
}
- up(&probe_sem);
+ mutex_unlock(&probing_active);
return mask & val;
}
@@ -160,7 +174,7 @@ int probe_irq_off(unsigned long val)
int i, irq_found = 0, nr_irqs = 0;
for (i = 0; i < NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
+ struct irq_desc *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
@@ -173,16 +187,16 @@ int probe_irq_off(unsigned long val)
nr_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
+ desc->chip->shutdown(i);
}
spin_unlock_irq(&desc->lock);
}
- up(&probe_sem);
+ mutex_unlock(&probing_active);
if (nr_irqs > 1)
irq_found = -irq_found;
+
return irq_found;
}
-
EXPORT_SYMBOL(probe_irq_off);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
new file mode 100644
index 000000000000..4a0952d9458b
--- /dev/null
+++ b/kernel/irq/chip.c
@@ -0,0 +1,525 @@
+/*
+ * linux/kernel/irq/chip.c
+ *
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
+ *
+ * This file contains the core interrupt handling code, for irq-chip
+ * based architectures.
+ *
+ * Detailed information is available in Documentation/DocBook/genericirq
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include "internals.h"
+
+/**
+ * set_irq_chip - set the irq chip for an irq
+ * @irq: irq number
+ * @chip: pointer to irq chip description structure
+ */
+int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!chip)
+ chip = &no_irq_chip;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock, flags);
+ irq_chip_set_defaults(chip);
+ desc->chip = chip;
+ /*
+ * For compatibility only:
+ */
+ desc->chip = chip;
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(set_irq_chip);
+
+/**
+ * set_irq_type - set the irq type for an irq
+ * @irq: irq number
+ * @type: interrupt type - see include/linux/interrupt.h
+ */
+int set_irq_type(unsigned int irq, unsigned int type)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ int ret = -ENXIO;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
+ return -ENODEV;
+ }
+
+ desc = irq_desc + irq;
+ if (desc->chip->set_type) {
+ spin_lock_irqsave(&desc->lock, flags);
+ ret = desc->chip->set_type(irq, type);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(set_irq_type);
+
+/**
+ * set_irq_data - set irq type data for an irq
+ * @irq: Interrupt number
+ * @data: Pointer to interrupt specific data
+ *
+ * Set the hardware irq controller data for an irq
+ */
+int set_irq_data(unsigned int irq, void *data)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR
+ "Trying to install controller data for IRQ%d\n", irq);
+ return -EINVAL;
+ }
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->handler_data = data;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(set_irq_data);
+
+/**
+ * set_irq_chip_data - set irq chip data for an irq
+ * @irq: Interrupt number
+ * @data: Pointer to chip specific data
+ *
+ * Set the hardware irq chip data for an irq
+ */
+int set_irq_chip_data(unsigned int irq, void *data)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !desc->chip) {
+ printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->chip_data = data;
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(set_irq_chip_data);
+
+/*
+ * default enable function
+ */
+static void default_enable(unsigned int irq)
+{
+ struct irq_desc *desc = irq_desc + irq;
+
+ desc->chip->unmask(irq);
+ desc->status &= ~IRQ_MASKED;
+}
+
+/*
+ * default disable function
+ */
+static void default_disable(unsigned int irq)
+{
+ struct irq_desc *desc = irq_desc + irq;
+
+ if (!(desc->status & IRQ_DELAYED_DISABLE))
+ irq_desc[irq].chip->mask(irq);
+}
+
+/*
+ * default startup function
+ */
+static unsigned int default_startup(unsigned int irq)
+{
+ irq_desc[irq].chip->enable(irq);
+
+ return 0;
+}
+
+/*
+ * Fixup enable/disable function pointers
+ */
+void irq_chip_set_defaults(struct irq_chip *chip)
+{
+ if (!chip->enable)
+ chip->enable = default_enable;
+ if (!chip->disable)
+ chip->disable = default_disable;
+ if (!chip->startup)
+ chip->startup = default_startup;
+ if (!chip->shutdown)
+ chip->shutdown = chip->disable;
+ if (!chip->name)
+ chip->name = chip->typename;
+}
+
+static inline void mask_ack_irq(struct irq_desc *desc, int irq)
+{
+ if (desc->chip->mask_ack)
+ desc->chip->mask_ack(irq);
+ else {
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ }
+}
+
+/**
+ * handle_simple_irq - Simple and software-decoded IRQs.
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ * @regs: pointer to a register structure
+ *
+ * Simple interrupts are either sent from a demultiplexing interrupt
+ * handler or come from hardware, where no interrupt hardware control
+ * is necessary.
+ *
+ * Note: The caller is expected to handle the ack, clear, mask and
+ * unmask issues if necessary.
+ */
+void fastcall
+handle_simple_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
+{
+ struct irqaction *action;
+ irqreturn_t action_ret;
+ const unsigned int cpu = smp_processor_id();
+
+ spin_lock(&desc->lock);
+
+ if (unlikely(desc->status & IRQ_INPROGRESS))
+ goto out_unlock;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ kstat_cpu(cpu).irqs[irq]++;
+
+ action = desc->action;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ goto out_unlock;
+
+ desc->status |= IRQ_INPROGRESS;
+ spin_unlock(&desc->lock);
+
+ action_ret = handle_IRQ_event(irq, regs, action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret, regs);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+out_unlock:
+ spin_unlock(&desc->lock);
+}
+
+/**
+ * handle_level_irq - Level type irq handler
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ * @regs: pointer to a register structure
+ *
+ * Level type interrupts are active as long as the hardware line has
+ * the active level. This may require to mask the interrupt and unmask
+ * it after the associated handler has acknowledged the device, so the
+ * interrupt line is back to inactive.
+ */
+void fastcall
+handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
+{
+ unsigned int cpu = smp_processor_id();
+ struct irqaction *action;
+ irqreturn_t action_ret;
+
+ spin_lock(&desc->lock);
+ mask_ack_irq(desc, irq);
+
+ if (unlikely(desc->status & IRQ_INPROGRESS))
+ goto out;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ kstat_cpu(cpu).irqs[irq]++;
+
+ /*
+ * If its disabled or no action available
+ * keep it masked and get out of here
+ */
+ action = desc->action;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ goto out;
+
+ desc->status |= IRQ_INPROGRESS;
+ spin_unlock(&desc->lock);
+
+ action_ret = handle_IRQ_event(irq, regs, action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret, regs);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+ desc->chip->unmask(irq);
+ spin_unlock(&desc->lock);
+}
+
+/**
+ * handle_fasteoi_irq - irq handler for transparent controllers
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ * @regs: pointer to a register structure
+ *
+ * Only a single callback will be issued to the chip: an ->eoi()
+ * call when the interrupt has been serviced. This enables support
+ * for modern forms of interrupt handlers, which handle the flow
+ * details in hardware, transparently.
+ */
+void fastcall
+handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc,
+ struct pt_regs *regs)
+{
+ unsigned int cpu = smp_processor_id();
+ struct irqaction *action;
+ irqreturn_t action_ret;
+
+ spin_lock(&desc->lock);
+
+ if (unlikely(desc->status & IRQ_INPROGRESS))
+ goto out;
+
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ kstat_cpu(cpu).irqs[irq]++;
+
+ /*
+ * If its disabled or no action available
+ * keep it masked and get out of here
+ */
+ action = desc->action;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
+ desc->status |= IRQ_PENDING;
+ goto out;
+ }
+
+ desc->status |= IRQ_INPROGRESS;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&desc->lock);
+
+ action_ret = handle_IRQ_event(irq, regs, action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret, regs);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ desc->chip->eoi(irq);
+
+ spin_unlock(&desc->lock);
+}
+
+/**
+ * handle_edge_irq - edge type IRQ handler
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ * @regs: pointer to a register structure
+ *
+ * Interrupt occures on the falling and/or rising edge of a hardware
+ * signal. The occurence is latched into the irq controller hardware
+ * and must be acked in order to be reenabled. After the ack another
+ * interrupt can happen on the same source even before the first one
+ * is handled by the assosiacted event handler. If this happens it
+ * might be necessary to disable (mask) the interrupt depending on the
+ * controller hardware. This requires to reenable the interrupt inside
+ * of the loop which handles the interrupts which have arrived while
+ * the handler was running. If all pending interrupts are handled, the
+ * loop is left.
+ */
+void fastcall
+handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
+{
+ const unsigned int cpu = smp_processor_id();
+
+ spin_lock(&desc->lock);
+
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+
+ /*
+ * If we're currently running this IRQ, or its disabled,
+ * we shouldn't process the IRQ. Mark it pending, handle
+ * the necessary masking and go out
+ */
+ if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
+ !desc->action)) {
+ desc->status |= (IRQ_PENDING | IRQ_MASKED);
+ mask_ack_irq(desc, irq);
+ goto out_unlock;
+ }
+
+ kstat_cpu(cpu).irqs[irq]++;
+
+ /* Start handling the irq */
+ desc->chip->ack(irq);
+
+ /* Mark the IRQ currently in progress.*/
+ desc->status |= IRQ_INPROGRESS;
+
+ do {
+ struct irqaction *action = desc->action;
+ irqreturn_t action_ret;
+
+ if (unlikely(!action)) {
+ desc->chip->mask(irq);
+ goto out_unlock;
+ }
+
+ /*
+ * When another irq arrived while we were handling
+ * one, we could have masked the irq.
+ * Renable it, if it was not disabled in meantime.
+ */
+ if (unlikely((desc->status &
+ (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
+ (IRQ_PENDING | IRQ_MASKED))) {
+ desc->chip->unmask(irq);
+ desc->status &= ~IRQ_MASKED;
+ }
+
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&desc->lock);
+ action_ret = handle_IRQ_event(irq, regs, action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret, regs);
+ spin_lock(&desc->lock);
+
+ } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+
+ desc->status &= ~IRQ_INPROGRESS;
+out_unlock:
+ spin_unlock(&desc->lock);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * handle_percpu_IRQ - Per CPU local irq handler
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ * @regs: pointer to a register structure
+ *
+ * Per CPU interrupts on SMP machines without locking requirements
+ */
+void fastcall
+handle_percpu_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
+{
+ irqreturn_t action_ret;
+
+ kstat_this_cpu.irqs[irq]++;
+
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
+
+ action_ret = handle_IRQ_event(irq, regs, desc->action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret, regs);
+
+ if (desc->chip->eoi)
+ desc->chip->eoi(irq);
+}
+
+#endif /* CONFIG_SMP */
+
+void
+__set_irq_handler(unsigned int irq,
+ void fastcall (*handle)(unsigned int, irq_desc_t *,
+ struct pt_regs *),
+ int is_chained)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR
+ "Trying to install type control for IRQ%d\n", irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+
+ if (!handle)
+ handle = handle_bad_irq;
+
+ if (is_chained && desc->chip == &no_irq_chip)
+ printk(KERN_WARNING "Trying to install "
+ "chained interrupt type for IRQ%d\n", irq);
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ /* Uninstall? */
+ if (handle == handle_bad_irq) {
+ if (desc->chip != &no_irq_chip) {
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ }
+ desc->status |= IRQ_DISABLED;
+ desc->depth = 1;
+ }
+ desc->handle_irq = handle;
+
+ if (handle != handle_bad_irq && is_chained) {
+ desc->status &= ~IRQ_DISABLED;
+ desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
+ desc->depth = 0;
+ desc->chip->unmask(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void
+set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ void fastcall (*handle)(unsigned int,
+ struct irq_desc *,
+ struct pt_regs *))
+{
+ set_irq_chip(irq, chip);
+ __set_irq_handler(irq, handle, 0);
+}
+
+/*
+ * Get a descriptive string for the highlevel handler, for
+ * /proc/interrupts output:
+ */
+const char *
+handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *,
+ struct pt_regs *))
+{
+ if (handle == handle_level_irq)
+ return "level ";
+ if (handle == handle_fasteoi_irq)
+ return "fasteoi";
+ if (handle == handle_edge_irq)
+ return "edge ";
+ if (handle == handle_simple_irq)
+ return "simple ";
+#ifdef CONFIG_SMP
+ if (handle == handle_percpu_irq)
+ return "percpu ";
+#endif
+ if (handle == handle_bad_irq)
+ return "bad ";
+
+ return NULL;
+}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 0f6530117105..5a360dd4331b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -1,9 +1,13 @@
/*
* linux/kernel/irq/handle.c
*
- * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the core interrupt handling code.
+ *
+ * Detailed information is available in Documentation/DocBook/genericirq
+ *
*/
#include <linux/irq.h>
@@ -14,11 +18,22 @@
#include "internals.h"
+/**
+ * handle_bad_irq - handle spurious and unhandled irqs
+ */
+void fastcall
+handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
+{
+ print_irq_desc(irq, desc);
+ kstat_this_cpu.irqs[irq]++;
+ ack_bad_irq(irq);
+}
+
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the apropriate
+ * interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the
* interrupt-controller.
*
@@ -28,41 +43,52 @@
*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
- .handler = &no_irq_type,
- .lock = SPIN_LOCK_UNLOCKED
+ .chip = &no_irq_chip,
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = SPIN_LOCK_UNLOCKED,
+#ifdef CONFIG_SMP
+ .affinity = CPU_MASK_ALL
+#endif
}
};
/*
- * Generic 'no controller' code
+ * What should we do if we get a hw irq event on an illegal vector?
+ * Each architecture has to answer this themself.
*/
-static void end_none(unsigned int irq) { }
-static void enable_none(unsigned int irq) { }
-static void disable_none(unsigned int irq) { }
-static void shutdown_none(unsigned int irq) { }
-static unsigned int startup_none(unsigned int irq) { return 0; }
-
-static void ack_none(unsigned int irq)
+static void ack_bad(unsigned int irq)
{
- /*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themself.
- */
+ print_irq_desc(irq, irq_desc + irq);
ack_bad_irq(irq);
}
-struct hw_interrupt_type no_irq_type = {
- .typename = "none",
- .startup = startup_none,
- .shutdown = shutdown_none,
- .enable = enable_none,
- .disable = disable_none,
- .ack = ack_none,
- .end = end_none,
- .set_affinity = NULL
+/*
+ * NOP functions
+ */
+static void noop(unsigned int irq)
+{
+}
+
+static unsigned int noop_ret(unsigned int irq)
+{
+ return 0;
+}
+
+/*
+ * Generic no controller implementation
+ */
+struct irq_chip no_irq_chip = {
+ .name = "none",
+ .startup = noop_ret,
+ .shutdown = noop,
+ .enable = noop,
+ .disable = noop,
+ .ack = ack_bad,
+ .end = noop,
};
/*
@@ -73,11 +99,16 @@ irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
-/*
- * Have got an event to handle:
+/**
+ * handle_IRQ_event - irq action chain handler
+ * @irq: the interrupt number
+ * @regs: pointer to a register structure
+ * @action: the interrupt action chain for this irq
+ *
+ * Handles the action chain of an irq event
*/
-fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
- struct irqaction *action)
+irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+ struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
@@ -100,15 +131,22 @@ fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
return retval;
}
-/*
- * do_IRQ handles all normal device IRQ's (the special
+/**
+ * __do_IRQ - original all in one highlevel IRQ handler
+ * @irq: the interrupt number
+ * @regs: pointer to a register structure
+ *
+ * __do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
+ *
+ * This is the original x86 implementation which is used for every
+ * interrupt type.
*/
fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
{
- irq_desc_t *desc = irq_desc + irq;
- struct irqaction * action;
+ struct irq_desc *desc = irq_desc + irq;
+ struct irqaction *action;
unsigned int status;
kstat_this_cpu.irqs[irq]++;
@@ -118,16 +156,16 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
/*
* No locking required for CPU-local interrupts:
*/
- if (desc->handler->ack)
- desc->handler->ack(irq);
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
action_ret = handle_IRQ_event(irq, regs, desc->action);
- desc->handler->end(irq);
+ desc->chip->end(irq);
return 1;
}
spin_lock(&desc->lock);
- if (desc->handler->ack)
- desc->handler->ack(irq);
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
@@ -187,7 +225,7 @@ out:
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
- desc->handler->end(irq);
+ desc->chip->end(irq);
spin_unlock(&desc->lock);
return 1;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 46feba630266..08a849a22447 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -4,6 +4,12 @@
extern int noirqdebug;
+/* Set default functions for irq_chip structures: */
+extern void irq_chip_set_defaults(struct irq_chip *chip);
+
+/* Set default handler: */
+extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
+
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq);
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
@@ -16,3 +22,43 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
+/*
+ * Debugging printout:
+ */
+
+#include <linux/kallsyms.h>
+
+#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+
+static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+ irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ printk("->handle_irq(): %p, ", desc->handle_irq);
+ print_symbol("%s\n", (unsigned long)desc->handle_irq);
+ printk("->chip(): %p, ", desc->chip);
+ print_symbol("%s\n", (unsigned long)desc->chip);
+ printk("->action(): %p\n", desc->action);
+ if (desc->action) {
+ printk("->action->handler(): %p, ", desc->action->handler);
+ print_symbol("%s\n", (unsigned long)desc->action->handler);
+ }
+
+ P(IRQ_INPROGRESS);
+ P(IRQ_DISABLED);
+ P(IRQ_PENDING);
+ P(IRQ_REPLAY);
+ P(IRQ_AUTODETECT);
+ P(IRQ_WAITING);
+ P(IRQ_LEVEL);
+ P(IRQ_MASKED);
+#ifdef CONFIG_IRQ_PER_CPU
+ P(IRQ_PER_CPU);
+#endif
+ P(IRQ_NOPROBE);
+ P(IRQ_NOREQUEST);
+ P(IRQ_NOAUTOEN);
+}
+
+#undef P
+
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1279e3499534..9eb1d518ee1c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1,7 +1,8 @@
/*
* linux/kernel/irq/manage.c
*
- * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006 Thomas Gleixner
*
* This file contains driver APIs to the irq subsystem.
*/
@@ -16,12 +17,6 @@
#ifdef CONFIG_SMP
-cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-#endif
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -42,7 +37,6 @@ void synchronize_irq(unsigned int irq)
while (desc->status & IRQ_INPROGRESS)
cpu_relax();
}
-
EXPORT_SYMBOL(synchronize_irq);
#endif
@@ -60,7 +54,7 @@ EXPORT_SYMBOL(synchronize_irq);
*/
void disable_irq_nosync(unsigned int irq)
{
- irq_desc_t *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
if (irq >= NR_IRQS)
@@ -69,11 +63,10 @@ void disable_irq_nosync(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags);
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
+ desc->chip->disable(irq);
}
spin_unlock_irqrestore(&desc->lock, flags);
}
-
EXPORT_SYMBOL(disable_irq_nosync);
/**
@@ -90,7 +83,7 @@ EXPORT_SYMBOL(disable_irq_nosync);
*/
void disable_irq(unsigned int irq)
{
- irq_desc_t *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
if (irq >= NR_IRQS)
return;
@@ -99,7 +92,6 @@ void disable_irq(unsigned int irq)
if (desc->action)
synchronize_irq(irq);
}
-
EXPORT_SYMBOL(disable_irq);
/**
@@ -114,7 +106,7 @@ EXPORT_SYMBOL(disable_irq);
*/
void enable_irq(unsigned int irq)
{
- irq_desc_t *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
if (irq >= NR_IRQS)
@@ -123,17 +115,15 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 0:
+ printk(KERN_WARNING "Unablanced enable_irq(%d)\n", irq);
WARN_ON(1);
break;
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(irq);
+ /* Prevent probing on this irq: */
+ desc->status = status | IRQ_NOPROBE;
+ check_irq_resend(desc, irq);
/* fall-through */
}
default:
@@ -141,9 +131,29 @@ void enable_irq(unsigned int irq)
}
spin_unlock_irqrestore(&desc->lock, flags);
}
-
EXPORT_SYMBOL(enable_irq);
+/**
+ * set_irq_wake - control irq power management wakeup
+ * @irq: interrupt to control
+ * @on: enable/disable power management wakeup
+ *
+ * Enable/disable power management wakeup mode
+ */
+int set_irq_wake(unsigned int irq, unsigned int on)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ unsigned long flags;
+ int ret = -ENXIO;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (desc->chip->set_wake)
+ ret = desc->chip->set_wake(irq, on);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(set_irq_wake);
+
/*
* Internal function that tells the architecture code whether a
* particular irq has been exclusively allocated or is available
@@ -153,7 +163,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
{
struct irqaction *action;
- if (irq >= NR_IRQS)
+ if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST)
return 0;
action = irq_desc[irq].action;
@@ -164,11 +174,22 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
return !action;
}
+void compat_irq_chip_set_default_handler(struct irq_desc *desc)
+{
+ /*
+ * If the architecture still has not overriden
+ * the flow handler then zap the default. This
+ * should catch incorrect flow-type setting.
+ */
+ if (desc->handle_irq == &handle_bad_irq)
+ desc->handle_irq = NULL;
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*/
-int setup_irq(unsigned int irq, struct irqaction * new)
+int setup_irq(unsigned int irq, struct irqaction *new)
{
struct irq_desc *desc = irq_desc + irq;
struct irqaction *old, **p;
@@ -178,7 +199,7 @@ int setup_irq(unsigned int irq, struct irqaction * new)
if (irq >= NR_IRQS)
return -EINVAL;
- if (desc->handler == &no_irq_type)
+ if (desc->chip == &no_irq_chip)
return -ENOSYS;
/*
* Some drivers like serial.c use request_irq() heavily,
@@ -200,14 +221,21 @@ int setup_irq(unsigned int irq, struct irqaction * new)
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock,flags);
+ spin_lock_irqsave(&desc->lock, flags);
p = &desc->action;
- if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ))
+ old = *p;
+ if (old) {
+ /*
+ * Can't share interrupts unless both agree to and are
+ * the same type (level, edge, polarity). So both flag
+ * fields must have SA_SHIRQ set and the bits which
+ * set the trigger type must match.
+ */
+ if (!((old->flags & new->flags) & SA_SHIRQ) ||
+ ((old->flags ^ new->flags) & SA_TRIGGER_MASK))
goto mismatch;
-#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
+#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU))
goto mismatch;
@@ -222,20 +250,44 @@ int setup_irq(unsigned int irq, struct irqaction * new)
}
*p = new;
-#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
+#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
if (new->flags & SA_PERCPU_IRQ)
desc->status |= IRQ_PER_CPU;
#endif
if (!shared) {
- desc->depth = 0;
- desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
- IRQ_WAITING | IRQ_INPROGRESS);
- if (desc->handler->startup)
- desc->handler->startup(irq);
- else
- desc->handler->enable(irq);
+ irq_chip_set_defaults(desc->chip);
+
+ /* Setup the type (level, edge polarity) if configured: */
+ if (new->flags & SA_TRIGGER_MASK) {
+ if (desc->chip && desc->chip->set_type)
+ desc->chip->set_type(irq,
+ new->flags & SA_TRIGGER_MASK);
+ else
+ /*
+ * SA_TRIGGER_* but the PIC does not support
+ * multiple flow-types?
+ */
+ printk(KERN_WARNING "setup_irq(%d) SA_TRIGGER"
+ "set. No set_type function available\n",
+ irq);
+ } else
+ compat_irq_chip_set_default_handler(desc);
+
+ desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
+ IRQ_INPROGRESS);
+
+ if (!(desc->status & IRQ_NOAUTOEN)) {
+ desc->depth = 0;
+ desc->status &= ~IRQ_DISABLED;
+ if (desc->chip->startup)
+ desc->chip->startup(irq);
+ else
+ desc->chip->enable(irq);
+ } else
+ /* Undo nested disables: */
+ desc->depth = 1;
}
- spin_unlock_irqrestore(&desc->lock,flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
new->irq = irq;
register_irq_proc(irq);
@@ -278,10 +330,10 @@ void free_irq(unsigned int irq, void *dev_id)
return;
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
+ spin_lock_irqsave(&desc->lock, flags);
p = &desc->action;
for (;;) {
- struct irqaction * action = *p;
+ struct irqaction *action = *p;
if (action) {
struct irqaction **pp = p;
@@ -295,18 +347,18 @@ void free_irq(unsigned int irq, void *dev_id)
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
- if (desc->handler->release)
- desc->handler->release(irq, dev_id);
+ if (desc->chip->release)
+ desc->chip->release(irq, dev_id);
#endif
if (!desc->action) {
desc->status |= IRQ_DISABLED;
- if (desc->handler->shutdown)
- desc->handler->shutdown(irq);
+ if (desc->chip->shutdown)
+ desc->chip->shutdown(irq);
else
- desc->handler->disable(irq);
+ desc->chip->disable(irq);
}
- spin_unlock_irqrestore(&desc->lock,flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU */
@@ -314,12 +366,11 @@ void free_irq(unsigned int irq, void *dev_id)
kfree(action);
return;
}
- printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
+ printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
return;
}
}
-
EXPORT_SYMBOL(free_irq);
/**
@@ -353,9 +404,9 @@ EXPORT_SYMBOL(free_irq);
*/
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long irqflags, const char * devname, void *dev_id)
+ unsigned long irqflags, const char *devname, void *dev_id)
{
- struct irqaction * action;
+ struct irqaction *action;
int retval;
/*
@@ -368,6 +419,8 @@ int request_irq(unsigned int irq,
return -EINVAL;
if (irq >= NR_IRQS)
return -EINVAL;
+ if (irq_desc[irq].status & IRQ_NOREQUEST)
+ return -EINVAL;
if (!handler)
return -EINVAL;
@@ -390,6 +443,5 @@ int request_irq(unsigned int irq,
return retval;
}
-
EXPORT_SYMBOL(request_irq);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a12d00eb5e7c..a57ebe9fa6f6 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -3,19 +3,19 @@
void set_pending_irq(unsigned int irq, cpumask_t mask)
{
- irq_desc_t *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
desc->move_irq = 1;
- pending_irq_cpumask[irq] = mask;
+ irq_desc[irq].pending_mask = mask;
spin_unlock_irqrestore(&desc->lock, flags);
}
void move_native_irq(int irq)
{
+ struct irq_desc *desc = irq_desc + irq;
cpumask_t tmp;
- irq_desc_t *desc = irq_descp(irq);
if (likely(!desc->move_irq))
return;
@@ -30,15 +30,15 @@ void move_native_irq(int irq)
desc->move_irq = 0;
- if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
+ if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
return;
- if (!desc->handler->set_affinity)
+ if (!desc->chip->set_affinity)
return;
assert_spin_locked(&desc->lock);
- cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+ cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
/*
* If there was a valid mask to work with, please
@@ -51,12 +51,12 @@ void move_native_irq(int irq)
*/
if (likely(!cpus_empty(tmp))) {
if (likely(!(desc->status & IRQ_DISABLED)))
- desc->handler->disable(irq);
+ desc->chip->disable(irq);
- desc->handler->set_affinity(irq,tmp);
+ desc->chip->set_affinity(irq,tmp);
if (likely(!(desc->status & IRQ_DISABLED)))
- desc->handler->enable(irq);
+ desc->chip->enable(irq);
}
- cpus_clear(pending_irq_cpumask[irq]);
+ cpus_clear(irq_desc[irq].pending_mask);
}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index afacd6f585fa..607c7809ad01 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,15 +12,10 @@
#include "internals.h"
-static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS];
+static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
-/*
- * The /proc/irq/<irq>/smp_affinity values:
- */
-static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
-
#ifdef CONFIG_GENERIC_PENDING_IRQ
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
@@ -36,15 +31,15 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
set_balance_irq_affinity(irq, mask_val);
- irq_affinity[irq] = mask_val;
- irq_desc[irq].handler->set_affinity(irq, mask_val);
+ irq_desc[irq].affinity = mask_val;
+ irq_desc[irq].chip->set_affinity(irq, mask_val);
}
#endif
static int irq_affinity_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+ int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
if (count - len < 2)
return -EINVAL;
@@ -59,7 +54,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
unsigned int irq = (int)(long)data, full_count = count, err;
cpumask_t new_value, tmp;
- if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
+ if (!irq_desc[irq].chip->set_affinity || no_irq_affinity)
return -EIO;
err = cpumask_parse(buffer, count, new_value);
@@ -102,7 +97,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
{
char name [MAX_NAMELEN];
- if (!irq_dir[irq] || action->dir || !action->name ||
+ if (!irq_desc[irq].dir || action->dir || !action->name ||
!name_unique(irq, action))
return;
@@ -110,7 +105,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
- action->dir = proc_mkdir(name, irq_dir[irq]);
+ action->dir = proc_mkdir(name, irq_desc[irq].dir);
}
#undef MAX_NAMELEN
@@ -122,22 +117,22 @@ void register_irq_proc(unsigned int irq)
char name [MAX_NAMELEN];
if (!root_irq_dir ||
- (irq_desc[irq].handler == &no_irq_type) ||
- irq_dir[irq])
+ (irq_desc[irq].chip == &no_irq_chip) ||
+ irq_desc[irq].dir)
return;
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
- irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+ irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
#ifdef CONFIG_SMP
{
struct proc_dir_entry *entry;
/* create /proc/irq/<irq>/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+ entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
if (entry) {
entry->nlink = 1;
@@ -145,7 +140,6 @@ void register_irq_proc(unsigned int irq)
entry->read_proc = irq_affinity_read_proc;
entry->write_proc = irq_affinity_write_proc;
}
- smp_affinity_entry[irq] = entry;
}
#endif
}
@@ -155,7 +149,7 @@ void register_irq_proc(unsigned int irq)
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
{
if (action->dir)
- remove_proc_entry(action->dir->name, irq_dir[irq]);
+ remove_proc_entry(action->dir->name, irq_desc[irq].dir);
}
void init_irq_proc(void)
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
new file mode 100644
index 000000000000..872f91ba2ce8
--- /dev/null
+++ b/kernel/irq/resend.c
@@ -0,0 +1,78 @@
+/*
+ * linux/kernel/irq/resend.c
+ *
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner
+ *
+ * This file contains the IRQ-resend code
+ *
+ * If the interrupt is waiting to be processed, we try to re-run it.
+ * We can't directly run it from here since the caller might be in an
+ * interrupt-protected region. Not all irq controller chips can
+ * retrigger interrupts at the hardware level, so in those cases
+ * we allow the resending of IRQs via a tasklet.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+
+#include "internals.h"
+
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+
+/* Bitmap to handle software resend of interrupts: */
+static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+
+/*
+ * Run software resends of IRQ's
+ */
+static void resend_irqs(unsigned long arg)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ while (!bitmap_empty(irqs_resend, NR_IRQS)) {
+ irq = find_first_bit(irqs_resend, NR_IRQS);
+ clear_bit(irq, irqs_resend);
+ desc = irq_desc + irq;
+ local_irq_disable();
+ desc->handle_irq(irq, desc, NULL);
+ local_irq_enable();
+ }
+}
+
+/* Tasklet to handle resend: */
+static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
+
+#endif
+
+/*
+ * IRQ resend
+ *
+ * Is called with interrupts disabled and desc->lock held.
+ */
+void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+{
+ unsigned int status = desc->status;
+
+ /*
+ * Make sure the interrupt is enabled, before resending it:
+ */
+ desc->chip->enable(irq);
+
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status &= ~IRQ_PENDING;
+ desc->status = status | IRQ_REPLAY;
+
+ if (!desc->chip || !desc->chip->retrigger ||
+ !desc->chip->retrigger(irq)) {
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+ /* Set it pending and activate the softirq: */
+ set_bit(irq, irqs_resend);
+ tasklet_schedule(&resend_tasklet);
+#endif
+ }
+ }
+}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b2fb3c18d06b..b483deed311c 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -16,22 +16,20 @@ static int irqfixup __read_mostly;
/*
* Recovery handler for misrouted interrupts.
*/
-
static int misrouted_irq(int irq, struct pt_regs *regs)
{
int i;
- irq_desc_t *desc;
int ok = 0;
int work = 0; /* Did we do work for a real IRQ */
- for(i = 1; i < NR_IRQS; i++) {
+ for (i = 1; i < NR_IRQS; i++) {
+ struct irq_desc *desc = irq_desc + i;
struct irqaction *action;
if (i == irq) /* Already tried */
continue;
- desc = &irq_desc[i];
+
spin_lock(&desc->lock);
- action = desc->action;
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -45,7 +43,9 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
+ action = desc->action;
spin_unlock(&desc->lock);
+
while (action) {
/* Only shared IRQ handlers are safe to call */
if (action->flags & SA_SHIRQ) {
@@ -62,9 +62,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
/*
* While we were looking for a fixup someone queued a real
- * IRQ clashing with our walk
+ * IRQ clashing with our walk:
*/
-
while ((desc->status & IRQ_PENDING) && action) {
/*
* Perform real IRQ processing for the IRQ we deferred
@@ -80,8 +79,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
* If we did actual work for the real IRQ line we must let the
* IRQ controller clean up too
*/
- if(work)
- desc->handler->end(i);
+ if (work && desc->chip && desc->chip->end)
+ desc->chip->end(i);
spin_unlock(&desc->lock);
}
/* So the caller can adjust the irq error counts */
@@ -100,7 +99,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
*/
static void
-__report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
+__report_bad_irq(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret)
{
struct irqaction *action;
@@ -113,6 +113,7 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
}
dump_stack();
printk(KERN_ERR "handlers:\n");
+
action = desc->action;
while (action) {
printk(KERN_ERR "[<%p>]", action->handler);
@@ -123,7 +124,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
}
}
-static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
+static void
+report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
{
static int count = 100;
@@ -133,8 +135,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
}
}
-void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
- struct pt_regs *regs)
+void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret, struct pt_regs *regs)
{
if (unlikely(action_ret != IRQ_HANDLED)) {
desc->irqs_unhandled++;
@@ -166,7 +168,8 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
*/
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
+ desc->depth = 1;
+ desc->chip->disable(irq);
}
desc->irqs_unhandled = 0;
}
@@ -177,6 +180,7 @@ int __init noirqdebug_setup(char *str)
{
noirqdebug = 1;
printk(KERN_INFO "IRQ lockup detection disabled\n");
+
return 1;
}
@@ -187,6 +191,7 @@ static int __init irqfixup_setup(char *str)
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
+
return 1;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fbf466a29aa..64aab081153b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,11 +47,17 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
+static atomic_t kprobe_count;
DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+static struct notifier_block kprobe_page_fault_nb = {
+ .notifier_call = kprobe_exceptions_notify,
+ .priority = 0x7fffffff /* we need to notified first */
+};
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -368,16 +374,15 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
*/
static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
{
- struct kprobe *kp;
-
if (p->break_handler) {
- list_for_each_entry_rcu(kp, &old_p->list, list) {
- if (kp->break_handler)
- return -EEXIST;
- }
+ if (old_p->break_handler)
+ return -EEXIST;
list_add_tail_rcu(&p->list, &old_p->list);
+ old_p->break_handler = aggr_break_handler;
} else
list_add_rcu(&p->list, &old_p->list);
+ if (p->post_handler && !old_p->post_handler)
+ old_p->post_handler = aggr_post_handler;
return 0;
}
@@ -390,9 +395,11 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
copy_kprobe(p, ap);
ap->addr = p->addr;
ap->pre_handler = aggr_pre_handler;
- ap->post_handler = aggr_post_handler;
ap->fault_handler = aggr_fault_handler;
- ap->break_handler = aggr_break_handler;
+ if (p->post_handler)
+ ap->post_handler = aggr_post_handler;
+ if (p->break_handler)
+ ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
list_add_rcu(&p->list, &ap->list);
@@ -464,6 +471,8 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
+ if (!ret)
+ atomic_inc(&kprobe_count);
goto out;
}
@@ -474,6 +483,10 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
+ if (atomic_add_return(1, &kprobe_count) == \
+ (ARCH_INACTIVE_KPROBE_COUNT + 1))
+ register_page_fault_notifier(&kprobe_page_fault_nb);
+
arch_arm_kprobe(p);
out:
@@ -536,14 +549,40 @@ valid_p:
kfree(old_p);
}
arch_remove_kprobe(p);
+ } else {
+ mutex_lock(&kprobe_mutex);
+ if (p->break_handler)
+ old_p->break_handler = NULL;
+ if (p->post_handler){
+ list_for_each_entry_rcu(list_p, &old_p->list, list){
+ if (list_p->post_handler){
+ cleanup_p = 2;
+ break;
+ }
+ }
+ if (cleanup_p == 0)
+ old_p->post_handler = NULL;
+ }
+ mutex_unlock(&kprobe_mutex);
}
+
+ /* Call unregister_page_fault_notifier()
+ * if no probes are active
+ */
+ mutex_lock(&kprobe_mutex);
+ if (atomic_add_return(-1, &kprobe_count) == \
+ ARCH_INACTIVE_KPROBE_COUNT)
+ unregister_page_fault_notifier(&kprobe_page_fault_nb);
+ mutex_unlock(&kprobe_mutex);
+ return;
}
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
- .priority = 0x7fffffff /* we need to notified first */
+ .priority = 0x7fffffff /* we need to be notified first */
};
+
int __kprobes register_jprobe(struct jprobe *jp)
{
/* Todo: Verify probepoint is a function entry point */
@@ -652,6 +691,7 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
+ atomic_set(&kprobe_count, 0);
err = arch_init_kprobes();
if (!err)
diff --git a/kernel/module.c b/kernel/module.c
index d75275de1c28..99c022ac3d21 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1,4 +1,4 @@
-/* Rewritten by Rusty Russell, on the backs of many others...
+/*
Copyright (C) 2002 Richard Henderson
Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
@@ -40,9 +40,11 @@
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/mutex.h>
+#include <linux/unwind.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/cacheflush.h>
+#include <linux/license.h>
#if 0
#define DEBUGP printk
@@ -120,9 +122,17 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
+extern const struct kernel_symbol __start___ksymtab_unused[];
+extern const struct kernel_symbol __stop___ksymtab_unused[];
+extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
+extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
+extern const struct kernel_symbol __start___ksymtab_gpl_future[];
+extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
extern const unsigned long __start___kcrctab[];
extern const unsigned long __start___kcrctab_gpl[];
extern const unsigned long __start___kcrctab_gpl_future[];
+extern const unsigned long __start___kcrctab_unused[];
+extern const unsigned long __start___kcrctab_unused_gpl[];
#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
@@ -142,6 +152,17 @@ static const struct kernel_symbol *lookup_symbol(const char *name,
return NULL;
}
+static void printk_unused_warning(const char *name)
+{
+ printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
+ "however this module is using it.\n", name);
+ printk(KERN_WARNING "This symbol will go away in the future.\n");
+ printk(KERN_WARNING "Please evalute if this is the right api to use, "
+ "and if it really is, submit a report the linux kernel "
+ "mailinglist together with submitting your code for "
+ "inclusion.\n");
+}
+
/* Find a symbol, return value, crc and module which owns it */
static unsigned long __find_symbol(const char *name,
struct module **owner,
@@ -184,6 +205,25 @@ static unsigned long __find_symbol(const char *name,
return ks->value;
}
+ ks = lookup_symbol(name, __start___ksymtab_unused,
+ __stop___ksymtab_unused);
+ if (ks) {
+ printk_unused_warning(name);
+ *crc = symversion(__start___kcrctab_unused,
+ (ks - __start___ksymtab_unused));
+ return ks->value;
+ }
+
+ if (gplok)
+ ks = lookup_symbol(name, __start___ksymtab_unused_gpl,
+ __stop___ksymtab_unused_gpl);
+ if (ks) {
+ printk_unused_warning(name);
+ *crc = symversion(__start___kcrctab_unused_gpl,
+ (ks - __start___ksymtab_unused_gpl));
+ return ks->value;
+ }
+
/* Now try modules. */
list_for_each_entry(mod, &modules, list) {
*owner = mod;
@@ -202,6 +242,23 @@ static unsigned long __find_symbol(const char *name,
return ks->value;
}
}
+ ks = lookup_symbol(name, mod->unused_syms, mod->unused_syms + mod->num_unused_syms);
+ if (ks) {
+ printk_unused_warning(name);
+ *crc = symversion(mod->unused_crcs, (ks - mod->unused_syms));
+ return ks->value;
+ }
+
+ if (gplok) {
+ ks = lookup_symbol(name, mod->unused_gpl_syms,
+ mod->unused_gpl_syms + mod->num_unused_gpl_syms);
+ if (ks) {
+ printk_unused_warning(name);
+ *crc = symversion(mod->unused_gpl_crcs,
+ (ks - mod->unused_gpl_syms));
+ return ks->value;
+ }
+ }
ks = lookup_symbol(name, mod->gpl_future_syms,
(mod->gpl_future_syms +
mod->num_gpl_future_syms));
@@ -1051,6 +1108,8 @@ static void free_module(struct module *mod)
remove_sect_attrs(mod);
mod_kobject_remove(mod);
+ unwind_remove_table(mod->unwind_info, 0);
+
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
@@ -1248,16 +1307,6 @@ static void layout_sections(struct module *mod,
}
}
-static inline int license_is_gpl_compatible(const char *license)
-{
- return (strcmp(license, "GPL") == 0
- || strcmp(license, "GPL v2") == 0
- || strcmp(license, "GPL and additional rights") == 0
- || strcmp(license, "Dual BSD/GPL") == 0
- || strcmp(license, "Dual MIT/GPL") == 0
- || strcmp(license, "Dual MPL/GPL") == 0);
-}
-
static void set_license(struct module *mod, const char *license)
{
if (!license)
@@ -1409,10 +1458,27 @@ static struct module *load_module(void __user *umod,
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
char *secstrings, *args, *modmagic, *strtab = NULL;
- unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
- exportindex, modindex, obsparmindex, infoindex, gplindex,
- crcindex, gplcrcindex, versindex, pcpuindex, gplfutureindex,
- gplfuturecrcindex;
+ unsigned int i;
+ unsigned int symindex = 0;
+ unsigned int strindex = 0;
+ unsigned int setupindex;
+ unsigned int exindex;
+ unsigned int exportindex;
+ unsigned int modindex;
+ unsigned int obsparmindex;
+ unsigned int infoindex;
+ unsigned int gplindex;
+ unsigned int crcindex;
+ unsigned int gplcrcindex;
+ unsigned int versindex;
+ unsigned int pcpuindex;
+ unsigned int gplfutureindex;
+ unsigned int gplfuturecrcindex;
+ unsigned int unwindex = 0;
+ unsigned int unusedindex;
+ unsigned int unusedcrcindex;
+ unsigned int unusedgplindex;
+ unsigned int unusedgplcrcindex;
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -1493,15 +1559,22 @@ static struct module *load_module(void __user *umod,
exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
+ unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
+ unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
+ unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
+ unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
+#ifdef ARCH_UNWIND_SECTION_NAME
+ unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME);
+#endif
/* Don't keep modinfo section */
sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -1510,6 +1583,8 @@ static struct module *load_module(void __user *umod,
sechdrs[symindex].sh_flags |= SHF_ALLOC;
sechdrs[strindex].sh_flags |= SHF_ALLOC;
#endif
+ if (unwindex)
+ sechdrs[unwindex].sh_flags |= SHF_ALLOC;
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -1639,14 +1714,27 @@ static struct module *load_module(void __user *umod,
mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
sizeof(*mod->gpl_future_syms);
+ mod->num_unused_syms = sechdrs[unusedindex].sh_size /
+ sizeof(*mod->unused_syms);
+ mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
+ sizeof(*mod->unused_gpl_syms);
mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
if (gplfuturecrcindex)
mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
+ mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr;
+ if (unusedcrcindex)
+ mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
+ mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr;
+ if (unusedgplcrcindex)
+ mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr;
+
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !crcindex) ||
(mod->num_gpl_syms && !gplcrcindex) ||
- (mod->num_gpl_future_syms && !gplfuturecrcindex)) {
+ (mod->num_gpl_future_syms && !gplfuturecrcindex) ||
+ (mod->num_unused_syms && !unusedcrcindex) ||
+ (mod->num_unused_gpl_syms && !unusedgplcrcindex)) {
printk(KERN_WARNING "%s: No versions for exported symbols."
" Tainting kernel.\n", mod->name);
add_taint(TAINT_FORCED_MODULE);
@@ -1738,6 +1826,11 @@ static struct module *load_module(void __user *umod,
goto arch_cleanup;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
+ /* Size of section 0 is 0, so this works well if no unwind info. */
+ mod->unwind_info = unwind_add_table(mod,
+ (void *)sechdrs[unwindex].sh_addr,
+ sechdrs[unwindex].sh_size);
+
/* Get rid of temporary copy */
vfree(hdr);
@@ -1836,6 +1929,7 @@ sys_init_module(void __user *umod,
mod->state = MODULE_STATE_LIVE;
/* Drop initial reference. */
module_put(mod);
+ unwind_remove_table(mod->unwind_info, 1);
module_free(mod, mod->module_init);
mod->module_init = NULL;
mod->init_size = 0;
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index f4913c376950..e38e4bac97ca 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/poison.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
@@ -153,13 +154,13 @@ next:
continue;
count++;
cursor = curr->next;
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n#%03d: ", count);
printk_lock(lock, filter ? 0 : 1);
goto next;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n");
}
@@ -316,7 +317,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
continue;
list_del_init(curr);
DEBUG_OFF();
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("BUG: %s/%d, lock held at task exit time!\n",
task->comm, task->pid);
@@ -325,7 +326,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
printk("exiting task is not even the owner??\n");
return;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
}
/*
@@ -352,7 +353,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
continue;
list_del_init(curr);
DEBUG_OFF();
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
current->comm, current->pid, lock, from, to);
@@ -362,7 +363,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
printk("freeing task is not even the owner??\n");
return;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
}
/*
@@ -381,7 +382,7 @@ void debug_mutex_set_owner(struct mutex *lock,
void debug_mutex_init_waiter(struct mutex_waiter *waiter)
{
- memset(waiter, 0x11, sizeof(*waiter));
+ memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
waiter->magic = waiter;
INIT_LIST_HEAD(&waiter->list);
}
@@ -397,7 +398,7 @@ void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
{
DEBUG_WARN_ON(!list_empty(&waiter->list));
- memset(waiter, 0x22, sizeof(*waiter));
+ memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index fd384050acb1..a5196c36a5fd 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -46,21 +46,6 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name);
-#define debug_spin_lock(lock) \
- do { \
- local_irq_disable(); \
- if (debug_mutex_on) \
- spin_lock(lock); \
- } while (0)
-
-#define debug_spin_unlock(lock) \
- do { \
- if (debug_mutex_on) \
- spin_unlock(lock); \
- local_irq_enable(); \
- preempt_check_resched(); \
- } while (0)
-
#define debug_spin_lock_save(lock, flags) \
do { \
local_irq_save(flags); \
@@ -68,7 +53,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
spin_lock(lock); \
} while (0)
-#define debug_spin_lock_restore(lock, flags) \
+#define debug_spin_unlock_restore(lock, flags) \
do { \
if (debug_mutex_on) \
spin_unlock(lock); \
@@ -76,20 +61,20 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
preempt_check_resched(); \
} while (0)
-#define spin_lock_mutex(lock) \
+#define spin_lock_mutex(lock, flags) \
do { \
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
\
DEBUG_WARN_ON(in_interrupt()); \
- debug_spin_lock(&debug_mutex_lock); \
+ debug_spin_lock_save(&debug_mutex_lock, flags); \
spin_lock(lock); \
DEBUG_WARN_ON(l->magic != l); \
} while (0)
-#define spin_unlock_mutex(lock) \
+#define spin_unlock_mutex(lock, flags) \
do { \
spin_unlock(lock); \
- debug_spin_unlock(&debug_mutex_lock); \
+ debug_spin_unlock_restore(&debug_mutex_lock, flags); \
} while (0)
#define DEBUG_OFF() \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5449b210d9ed..7043db21bbce 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -125,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned int old_val;
+ unsigned long flags;
debug_mutex_init_waiter(&waiter);
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
@@ -157,7 +158,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task->thread_info);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
return -EINTR;
@@ -165,9 +166,9 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
schedule();
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
}
/* got the lock - rejoice! */
@@ -178,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@ -203,10 +204,11 @@ static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
+ unsigned long flags;
DEBUG_WARN_ON(lock->owner != current_thread_info());
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
/*
* some architectures leave the lock unlocked in the fastpath failure
@@ -231,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
debug_mutex_clear_owner(lock);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
}
/*
@@ -276,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
+ unsigned long flags;
int prev;
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1);
if (likely(prev == 1))
@@ -287,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
return prev == 1;
}
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 00fe84e7b672..069189947257 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -9,8 +9,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define spin_lock_mutex(lock) spin_lock(lock)
-#define spin_unlock_mutex(lock) spin_unlock(lock)
+#define spin_lock_mutex(lock, flags) \
+ do { spin_lock(lock); (void)(flags); } while (0)
+#define spin_unlock_mutex(lock, flags) \
+ do { spin_unlock(lock); (void)(flags); } while (0)
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index fc311a4673a2..857b4fa09124 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -38,13 +38,22 @@ config PM_DEBUG
config PM_TRACE
bool "Suspend/resume event tracing"
- depends on PM && PM_DEBUG && X86_32
- default y
+ depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
+ default n
---help---
This enables some cheesy code to save the last PM event point in the
RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume).
+ To use this debugging feature you should attempt to suspend the machine,
+ then reboot it, then run
+
+ dmesg -s 1000000 | grep 'hash matches'
+
+ CAUTION: this option will cause your machine's real-time clock to be
+ set to an invalid time after a resume.
+
+
config SOFTWARE_SUSPEND
bool "Software Suspend"
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 98c41423f3b1..57a792982fb9 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -105,10 +105,6 @@ extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
-extern unsigned int count_special_pages(void);
-extern int save_special_mem(void);
-extern int restore_special_mem(void);
-
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3d9284100b22..24c96f354231 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,90 +39,8 @@ static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
static unsigned long *buffer;
-struct arch_saveable_page {
- unsigned long start;
- unsigned long end;
- char *data;
- struct arch_saveable_page *next;
-};
-static struct arch_saveable_page *arch_pages;
-
-int swsusp_add_arch_pages(unsigned long start, unsigned long end)
-{
- struct arch_saveable_page *tmp;
-
- while (start < end) {
- tmp = kzalloc(sizeof(struct arch_saveable_page), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- tmp->start = start;
- tmp->end = ((start >> PAGE_SHIFT) + 1) << PAGE_SHIFT;
- if (tmp->end > end)
- tmp->end = end;
- tmp->next = arch_pages;
- start = tmp->end;
- arch_pages = tmp;
- }
- return 0;
-}
-
-static unsigned int count_arch_pages(void)
-{
- unsigned int count = 0;
- struct arch_saveable_page *tmp = arch_pages;
- while (tmp) {
- count++;
- tmp = tmp->next;
- }
- return count;
-}
-
-static int save_arch_mem(void)
-{
- char *kaddr;
- struct arch_saveable_page *tmp = arch_pages;
- int offset;
-
- pr_debug("swsusp: Saving arch specific memory");
- while (tmp) {
- tmp->data = (char *)__get_free_page(GFP_ATOMIC);
- if (!tmp->data)
- return -ENOMEM;
- offset = tmp->start - (tmp->start & PAGE_MASK);
- /* arch pages might haven't a 'struct page' */
- kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
- memcpy(tmp->data + offset, kaddr + offset,
- tmp->end - tmp->start);
- kunmap_atomic(kaddr, KM_USER0);
-
- tmp = tmp->next;
- }
- return 0;
-}
-
-static int restore_arch_mem(void)
-{
- char *kaddr;
- struct arch_saveable_page *tmp = arch_pages;
- int offset;
-
- while (tmp) {
- if (!tmp->data)
- continue;
- offset = tmp->start - (tmp->start & PAGE_MASK);
- kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
- memcpy(kaddr + offset, tmp->data + offset,
- tmp->end - tmp->start);
- kunmap_atomic(kaddr, KM_USER0);
- free_page((long)tmp->data);
- tmp->data = NULL;
- tmp = tmp->next;
- }
- return 0;
-}
-
#ifdef CONFIG_HIGHMEM
-static unsigned int count_highmem_pages(void)
+unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned long zone_pfn;
@@ -199,7 +117,7 @@ static int save_highmem_zone(struct zone *zone)
return 0;
}
-static int save_highmem(void)
+int save_highmem(void)
{
struct zone *zone;
int res = 0;
@@ -216,7 +134,7 @@ static int save_highmem(void)
return 0;
}
-static int restore_highmem(void)
+int restore_highmem(void)
{
printk("swsusp: Restoring Highmem\n");
while (highmem_copy) {
@@ -238,29 +156,6 @@ static inline int save_highmem(void) {return 0;}
static inline int restore_highmem(void) {return 0;}
#endif
-unsigned int count_special_pages(void)
-{
- return count_arch_pages() + count_highmem_pages();
-}
-
-int save_special_mem(void)
-{
- int ret;
- ret = save_arch_mem();
- if (!ret)
- ret = save_highmem();
- return ret;
-}
-
-int restore_special_mem(void)
-{
- int ret;
- ret = restore_arch_mem();
- if (!ret)
- ret = restore_highmem();
- return ret;
-}
-
static int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
@@ -286,6 +181,7 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
return 0;
page = pfn_to_page(pfn);
+ BUG_ON(PageReserved(page) && PageNosave(page));
if (PageNosave(page))
return 0;
if (PageReserved(page) && pfn_is_nosave(pfn))
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index f0ee4e7780d6..17f669c83012 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -62,6 +62,16 @@ unsigned long image_size = 500 * 1024 * 1024;
int in_suspend __nosavedata = 0;
+#ifdef CONFIG_HIGHMEM
+unsigned int count_highmem_pages(void);
+int save_highmem(void);
+int restore_highmem(void);
+#else
+static inline int save_highmem(void) { return 0; }
+static inline int restore_highmem(void) { return 0; }
+static inline unsigned int count_highmem_pages(void) { return 0; }
+#endif
+
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
@@ -182,7 +192,7 @@ int swsusp_shrink_memory(void)
printk("Shrinking memory... ");
do {
- size = 2 * count_special_pages();
+ size = 2 * count_highmem_pages();
size += size / 50 + count_data_pages();
size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
PAGES_FOR_IO;
@@ -226,7 +236,7 @@ int swsusp_suspend(void)
goto Enable_irqs;
}
- if ((error = save_special_mem())) {
+ if ((error = save_highmem())) {
printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
goto Restore_highmem;
}
@@ -237,7 +247,7 @@ int swsusp_suspend(void)
/* Restore control flow magically appears here */
restore_processor_state();
Restore_highmem:
- restore_special_mem();
+ restore_highmem();
device_power_up();
Enable_irqs:
local_irq_enable();
@@ -263,7 +273,7 @@ int swsusp_resume(void)
*/
swsusp_free();
restore_processor_state();
- restore_special_mem();
+ restore_highmem();
touch_softlockup_watchdog();
device_power_up();
local_irq_enable();
diff --git a/kernel/profile.c b/kernel/profile.c
index 68afe121e507..5a730fdb1a2c 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
-static int profile_cpu_callback(struct notifier_block *info,
+static int __devinit profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 921c22ad16e4..335c5b932e14 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,8 +120,18 @@ int ptrace_check_attach(struct task_struct *child, int kill)
static int may_attach(struct task_struct *task)
{
- if (!task->mm)
- return -EPERM;
+ /* May we inspect the given task?
+ * This check is used both for attaching with ptrace
+ * and for allowing access to sensitive information in /proc.
+ *
+ * ptrace_attach denies several cases that /proc allows
+ * because setting up the necessary parent/child relationship
+ * or halting the specified task is impossible.
+ */
+ int dumpable = 0;
+ /* Don't let security modules deny introspection */
+ if (task == current)
+ return 0;
if (((current->uid != task->euid) ||
(current->uid != task->suid) ||
(current->uid != task->uid) ||
@@ -130,7 +140,9 @@ static int may_attach(struct task_struct *task)
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
return -EPERM;
smp_rmb();
- if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
+ if (task->mm)
+ dumpable = task->mm->dumpable;
+ if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
return security_ptrace(current, task);
@@ -176,6 +188,8 @@ repeat:
goto repeat;
}
+ if (!task->mm)
+ goto bad;
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
@@ -200,7 +214,7 @@ out:
return retval;
}
-void __ptrace_detach(struct task_struct *child, unsigned int data)
+static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
{
child->exit_code = data;
/* .. re-parent .. */
@@ -219,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
ptrace_disable(child);
write_lock_irq(&tasklist_lock);
+ /* protect against de_thread()->release_task() */
if (child->ptrace)
__ptrace_detach(child, data);
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 20e9710fc21c..f464f5ae3f11 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -182,6 +182,15 @@ long rcu_batches_completed(void)
return rcu_ctrlblk.completed;
}
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+
static void rcu_barrier_callback(struct rcu_head *notused)
{
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -539,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
}
-static int rcu_cpu_notify(struct notifier_block *self,
+static int __devinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -556,7 +565,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block rcu_nb = {
+static struct notifier_block __devinitdata rcu_nb = {
.notifier_call = rcu_cpu_notify,
};
@@ -619,6 +628,7 @@ module_param(qlowmark, int, 0);
module_param(rsinterval, int, 0);
#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
EXPORT_SYMBOL_GPL(call_rcu);
EXPORT_SYMBOL_GPL(call_rcu_bh);
EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 8154e7589d12..4d1c3d247127 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -1,5 +1,5 @@
/*
- * Read-Copy Update /proc-based torture test facility
+ * Read-Copy Update module-based torture test facility
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -53,6 +53,7 @@ static int stat_interval; /* Interval between stats, in seconds. */
static int verbose; /* Print more debug info. */
static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
+static char *torture_type = "rcu"; /* What to torture. */
module_param(nreaders, int, 0);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
@@ -64,13 +65,16 @@ module_param(test_no_idle_hz, bool, 0);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
module_param(shuffle_interval, int, 0);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-#define TORTURE_FLAG "rcutorture: "
+module_param(torture_type, charp, 0);
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh)");
+
+#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
- do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
+ do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_STRING(s) \
- do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_ERRSTRING(s) \
- do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
static char printk_buf[4096];
@@ -139,28 +143,6 @@ rcu_torture_free(struct rcu_torture *p)
spin_unlock_bh(&rcu_torture_lock);
}
-static void
-rcu_torture_cb(struct rcu_head *p)
-{
- int i;
- struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
-
- if (fullstop) {
- /* Test is ending, just drop callbacks on the floor. */
- /* The next initialization will pick up the pieces. */
- return;
- }
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- rcu_torture_free(rp);
- } else
- call_rcu(p, rcu_torture_cb);
-}
-
struct rcu_random_state {
unsigned long rrs_state;
unsigned long rrs_count;
@@ -191,6 +173,119 @@ rcu_random(struct rcu_random_state *rrsp)
}
/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_torture_ops {
+ void (*init)(void);
+ void (*cleanup)(void);
+ int (*readlock)(void);
+ void (*readunlock)(int idx);
+ int (*completed)(void);
+ void (*deferredfree)(struct rcu_torture *p);
+ int (*stats)(char *page);
+ char *name;
+};
+static struct rcu_torture_ops *cur_ops = NULL;
+
+/*
+ * Definitions for rcu torture testing.
+ */
+
+static int rcu_torture_read_lock(void)
+{
+ rcu_read_lock();
+ return 0;
+}
+
+static void rcu_torture_read_unlock(int idx)
+{
+ rcu_read_unlock();
+}
+
+static int rcu_torture_completed(void)
+{
+ return rcu_batches_completed();
+}
+
+static void
+rcu_torture_cb(struct rcu_head *p)
+{
+ int i;
+ struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
+
+ if (fullstop) {
+ /* Test is ending, just drop callbacks on the floor. */
+ /* The next initialization will pick up the pieces. */
+ return;
+ }
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+ } else
+ cur_ops->deferredfree(rp);
+}
+
+static void rcu_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferredfree = rcu_torture_deferred_free,
+ .stats = NULL,
+ .name = "rcu"
+};
+
+/*
+ * Definitions for rcu_bh torture testing.
+ */
+
+static int rcu_bh_torture_read_lock(void)
+{
+ rcu_read_lock_bh();
+ return 0;
+}
+
+static void rcu_bh_torture_read_unlock(int idx)
+{
+ rcu_read_unlock_bh();
+}
+
+static int rcu_bh_torture_completed(void)
+{
+ return rcu_batches_completed_bh();
+}
+
+static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_bh_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferredfree = rcu_bh_torture_deferred_free,
+ .stats = NULL,
+ .name = "rcu_bh"
+};
+
+static struct rcu_torture_ops *torture_ops[] =
+ { &rcu_ops, &rcu_bh_ops, NULL };
+
+/*
* RCU torture writer kthread. Repeatedly substitutes a new structure
* for that pointed to by rcu_torture_current, freeing the old structure
* after a series of grace periods (the "pipeline").
@@ -209,8 +304,6 @@ rcu_torture_writer(void *arg)
do {
schedule_timeout_uninterruptible(1);
- if (rcu_batches_completed() == oldbatch)
- continue;
if ((rp = rcu_torture_alloc()) == NULL)
continue;
rp->rtort_pipe_count = 0;
@@ -225,10 +318,10 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
+ cur_ops->deferredfree(old_rp);
}
rcu_torture_current_version++;
- oldbatch = rcu_batches_completed();
+ oldbatch = cur_ops->completed();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
while (!kthread_should_stop())
@@ -246,6 +339,7 @@ static int
rcu_torture_reader(void *arg)
{
int completed;
+ int idx;
DEFINE_RCU_RANDOM(rand);
struct rcu_torture *p;
int pipe_count;
@@ -254,12 +348,12 @@ rcu_torture_reader(void *arg)
set_user_nice(current, 19);
do {
- rcu_read_lock();
- completed = rcu_batches_completed();
+ idx = cur_ops->readlock();
+ completed = cur_ops->completed();
p = rcu_dereference(rcu_torture_current);
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
- rcu_read_unlock();
+ cur_ops->readunlock(idx);
schedule_timeout_interruptible(HZ);
continue;
}
@@ -273,14 +367,14 @@ rcu_torture_reader(void *arg)
pipe_count = RCU_TORTURE_PIPE_LEN;
}
++__get_cpu_var(rcu_torture_count)[pipe_count];
- completed = rcu_batches_completed() - completed;
+ completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
++__get_cpu_var(rcu_torture_batch)[completed];
preempt_enable();
- rcu_read_unlock();
+ cur_ops->readunlock(idx);
schedule();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
@@ -311,7 +405,7 @@ rcu_torture_printk(char *page)
if (pipesummary[i] != 0)
break;
}
- cnt += sprintf(&page[cnt], "rcutorture: ");
+ cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
"rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d",
@@ -324,7 +418,7 @@ rcu_torture_printk(char *page)
atomic_read(&n_rcu_torture_mberror));
if (atomic_read(&n_rcu_torture_mberror) != 0)
cnt += sprintf(&page[cnt], " !!!");
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
if (i > 1) {
cnt += sprintf(&page[cnt], "!!! ");
atomic_inc(&n_rcu_torture_error);
@@ -332,17 +426,19 @@ rcu_torture_printk(char *page)
cnt += sprintf(&page[cnt], "Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Reader Batch: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
cnt += sprintf(&page[cnt], " %d",
atomic_read(&rcu_torture_wcount[i]));
}
cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats != NULL)
+ cnt += cur_ops->stats(&page[cnt]);
return cnt;
}
@@ -444,11 +540,11 @@ rcu_torture_shuffle(void *arg)
static inline void
rcu_torture_print_module_parms(char *tag)
{
- printk(KERN_ALERT TORTURE_FLAG "--- %s: nreaders=%d "
+ printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d "
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
"shuffle_interval = %d\n",
- tag, nrealreaders, stat_interval, verbose, test_no_idle_hz,
- shuffle_interval);
+ torture_type, tag, nrealreaders, stat_interval, verbose,
+ test_no_idle_hz, shuffle_interval);
}
static void
@@ -493,6 +589,9 @@ rcu_torture_cleanup(void)
rcu_barrier();
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+
+ if (cur_ops->cleanup != NULL)
+ cur_ops->cleanup();
if (atomic_read(&n_rcu_torture_error))
rcu_torture_print_module_parms("End of test: FAILURE");
else
@@ -508,6 +607,20 @@ rcu_torture_init(void)
/* Process args and tell the world that the torturer is on the job. */
+ for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
+ cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cur_ops->name) == 0) {
+ break;
+ }
+ }
+ if (cur_ops == NULL) {
+ printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
+ torture_type);
+ return (-EINVAL);
+ }
+ if (cur_ops->init != NULL)
+ cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
if (nreaders >= 0)
nrealreaders = nreaders;
else
diff --git a/kernel/resource.c b/kernel/resource.c
index e3080fcc66a3..bf1130d81b7f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -23,20 +23,18 @@
struct resource ioport_resource = {
.name = "PCI IO",
- .start = 0x0000,
+ .start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
-
EXPORT_SYMBOL(ioport_resource);
struct resource iomem_resource = {
.name = "PCI mem",
- .start = 0UL,
- .end = ~0UL,
+ .start = 0,
+ .end = -1,
.flags = IORESOURCE_MEM,
};
-
EXPORT_SYMBOL(iomem_resource);
static DEFINE_RWLOCK(resource_lock);
@@ -83,10 +81,10 @@ static int r_show(struct seq_file *m, void *v)
for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
if (p->parent == root)
break;
- seq_printf(m, "%*s%0*lx-%0*lx : %s\n",
+ seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
depth * 2, "",
- width, r->start,
- width, r->end,
+ width, (unsigned long long) r->start,
+ width, (unsigned long long) r->end,
r->name ? r->name : "<BAD>");
return 0;
}
@@ -151,8 +149,8 @@ __initcall(ioresources_init);
/* Return the conflict entry if you can't request it */
static struct resource * __request_resource(struct resource *root, struct resource *new)
{
- unsigned long start = new->start;
- unsigned long end = new->end;
+ resource_size_t start = new->start;
+ resource_size_t end = new->end;
struct resource *tmp, **p;
if (end < start)
@@ -232,15 +230,52 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Finds the lowest memory reosurce exists within [res->start.res->end)
+ * the caller must specify res->start, res->end, res->flags.
+ * If found, returns 0, res is overwritten, if not found, returns -1.
+ */
+int find_next_system_ram(struct resource *res)
+{
+ resource_size_t start, end;
+ struct resource *p;
+
+ BUG_ON(!res);
+
+ start = res->start;
+ end = res->end;
+
+ read_lock(&resource_lock);
+ for (p = iomem_resource.child; p ; p = p->sibling) {
+ /* system ram is just marked as IORESOURCE_MEM */
+ if (p->flags != res->flags)
+ continue;
+ if (p->start > end) {
+ p = NULL;
+ break;
+ }
+ if (p->start >= start)
+ break;
+ }
+ read_unlock(&resource_lock);
+ if (!p)
+ return -1;
+ /* copy data */
+ res->start = p->start;
+ res->end = p->end;
+ return 0;
+}
+#endif
+
/*
* Find empty slot in the resource tree given range and alignment.
*/
static int find_resource(struct resource *root, struct resource *new,
- unsigned long size,
- unsigned long min, unsigned long max,
- unsigned long align,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
+ resource_size_t, resource_size_t),
void *alignf_data)
{
struct resource *this = root->child;
@@ -282,11 +317,10 @@ static int find_resource(struct resource *root, struct resource *new,
* Allocate empty slot in the resource tree given range and alignment.
*/
int allocate_resource(struct resource *root, struct resource *new,
- unsigned long size,
- unsigned long min, unsigned long max,
- unsigned long align,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
void (*alignf)(void *, struct resource *,
- unsigned long, unsigned long),
+ resource_size_t, resource_size_t),
void *alignf_data)
{
int err;
@@ -378,10 +412,10 @@ EXPORT_SYMBOL(insert_resource);
* arguments. Returns -EBUSY if it can't fit. Existing children of
* the resource are assumed to be immutable.
*/
-int adjust_resource(struct resource *res, unsigned long start, unsigned long size)
+int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
{
struct resource *tmp, *parent = res->parent;
- unsigned long end = start + size - 1;
+ resource_size_t end = start + size - 1;
int result = -EBUSY;
write_lock(&resource_lock);
@@ -428,7 +462,9 @@ EXPORT_SYMBOL(adjust_resource);
*
* Release-region releases a matching busy region.
*/
-struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
+struct resource * __request_region(struct resource *parent,
+ resource_size_t start, resource_size_t n,
+ const char *name)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
@@ -464,7 +500,8 @@ struct resource * __request_region(struct resource *parent, unsigned long start,
EXPORT_SYMBOL(__request_region);
-int __check_region(struct resource *parent, unsigned long start, unsigned long n)
+int __check_region(struct resource *parent, resource_size_t start,
+ resource_size_t n)
{
struct resource * res;
@@ -479,10 +516,11 @@ int __check_region(struct resource *parent, unsigned long start, unsigned long n
EXPORT_SYMBOL(__check_region);
-void __release_region(struct resource *parent, unsigned long start, unsigned long n)
+void __release_region(struct resource *parent, resource_size_t start,
+ resource_size_t n)
{
struct resource **p;
- unsigned long end;
+ resource_size_t end;
p = &parent->child;
end = start + n - 1;
@@ -511,7 +549,9 @@ void __release_region(struct resource *parent, unsigned long start, unsigned lon
write_unlock(&resource_lock);
- printk(KERN_WARNING "Trying to free nonexistent resource <%08lx-%08lx>\n", start, end);
+ printk(KERN_WARNING "Trying to free nonexistent resource "
+ "<%016llx-%016llx>\n", (unsigned long long)start,
+ (unsigned long long)end);
}
EXPORT_SYMBOL(__release_region);
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
new file mode 100644
index 000000000000..4aa8a2c9f453
--- /dev/null
+++ b/kernel/rtmutex-debug.c
@@ -0,0 +1,513 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This code is based on the rt.c implementation in the preempt-rt tree.
+ * Portions of said code are
+ *
+ * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Copyright (C) 2006 Esben Nielsen
+ * Copyright (C) 2006 Kihon Technologies Inc.,
+ * Steven Rostedt <rostedt@goodmis.org>
+ *
+ * See rt.c in preempt-rt for proper credits and further information
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/plist.h>
+#include <linux/fs.h>
+
+#include "rtmutex_common.h"
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# include "rtmutex-debug.h"
+#else
+# include "rtmutex.h"
+#endif
+
+# define TRACE_WARN_ON(x) WARN_ON(x)
+# define TRACE_BUG_ON(x) BUG_ON(x)
+
+# define TRACE_OFF() \
+do { \
+ if (rt_trace_on) { \
+ rt_trace_on = 0; \
+ console_verbose(); \
+ if (spin_is_locked(&current->pi_lock)) \
+ spin_unlock(&current->pi_lock); \
+ if (spin_is_locked(&current->held_list_lock)) \
+ spin_unlock(&current->held_list_lock); \
+ } \
+} while (0)
+
+# define TRACE_OFF_NOLOCK() \
+do { \
+ if (rt_trace_on) { \
+ rt_trace_on = 0; \
+ console_verbose(); \
+ } \
+} while (0)
+
+# define TRACE_BUG_LOCKED() \
+do { \
+ TRACE_OFF(); \
+ BUG(); \
+} while (0)
+
+# define TRACE_WARN_ON_LOCKED(c) \
+do { \
+ if (unlikely(c)) { \
+ TRACE_OFF(); \
+ WARN_ON(1); \
+ } \
+} while (0)
+
+# define TRACE_BUG_ON_LOCKED(c) \
+do { \
+ if (unlikely(c)) \
+ TRACE_BUG_LOCKED(); \
+} while (0)
+
+#ifdef CONFIG_SMP
+# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
+#else
+# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
+#endif
+
+/*
+ * deadlock detection flag. We turn it off when we detect
+ * the first problem because we dont want to recurse back
+ * into the tracing code when doing error printk or
+ * executing a BUG():
+ */
+int rt_trace_on = 1;
+
+void deadlock_trace_off(void)
+{
+ rt_trace_on = 0;
+}
+
+static void printk_task(task_t *p)
+{
+ if (p)
+ printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_task_short(task_t *p)
+{
+ if (p)
+ printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_lock(struct rt_mutex *lock, int print_owner)
+{
+ if (lock->name)
+ printk(" [%p] {%s}\n",
+ lock, lock->name);
+ else
+ printk(" [%p] {%s:%d}\n",
+ lock, lock->file, lock->line);
+
+ if (print_owner && rt_mutex_owner(lock)) {
+ printk(".. ->owner: %p\n", lock->owner);
+ printk(".. held by: ");
+ printk_task(rt_mutex_owner(lock));
+ printk("\n");
+ }
+ if (rt_mutex_owner(lock)) {
+ printk("... acquired at: ");
+ print_symbol("%s\n", lock->acquire_ip);
+ }
+}
+
+static void printk_waiter(struct rt_mutex_waiter *w)
+{
+ printk("-------------------------\n");
+ printk("| waiter struct %p:\n", w);
+ printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
+ w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
+ w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
+ w->list_entry.prio);
+ printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
+ w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
+ w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
+ w->pi_list_entry.prio);
+ printk("\n| lock:\n");
+ printk_lock(w->lock, 1);
+ printk("| w->ti->task:\n");
+ printk_task(w->task);
+ printk("| blocked at: ");
+ print_symbol("%s\n", w->ip);
+ printk("-------------------------\n");
+}
+
+static void show_task_locks(task_t *p)
+{
+ switch (p->state) {
+ case TASK_RUNNING: printk("R"); break;
+ case TASK_INTERRUPTIBLE: printk("S"); break;
+ case TASK_UNINTERRUPTIBLE: printk("D"); break;
+ case TASK_STOPPED: printk("T"); break;
+ case EXIT_ZOMBIE: printk("Z"); break;
+ case EXIT_DEAD: printk("X"); break;
+ default: printk("?"); break;
+ }
+ printk_task(p);
+ if (p->pi_blocked_on) {
+ struct rt_mutex *lock = p->pi_blocked_on->lock;
+
+ printk(" blocked on:");
+ printk_lock(lock, 1);
+ } else
+ printk(" (not blocked)\n");
+}
+
+void rt_mutex_show_held_locks(task_t *task, int verbose)
+{
+ struct list_head *curr, *cursor = NULL;
+ struct rt_mutex *lock;
+ task_t *t;
+ unsigned long flags;
+ int count = 0;
+
+ if (!rt_trace_on)
+ return;
+
+ if (verbose) {
+ printk("------------------------------\n");
+ printk("| showing all locks held by: | (");
+ printk_task_short(task);
+ printk("):\n");
+ printk("------------------------------\n");
+ }
+
+next:
+ spin_lock_irqsave(&task->held_list_lock, flags);
+ list_for_each(curr, &task->held_list_head) {
+ if (cursor && curr != cursor)
+ continue;
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+ t = rt_mutex_owner(lock);
+ WARN_ON(t != task);
+ count++;
+ cursor = curr->next;
+ spin_unlock_irqrestore(&task->held_list_lock, flags);
+
+ printk("\n#%03d: ", count);
+ printk_lock(lock, 0);
+ goto next;
+ }
+ spin_unlock_irqrestore(&task->held_list_lock, flags);
+
+ printk("\n");
+}
+
+void rt_mutex_show_all_locks(void)
+{
+ task_t *g, *p;
+ int count = 10;
+ int unlock = 1;
+
+ printk("\n");
+ printk("----------------------\n");
+ printk("| showing all tasks: |\n");
+ printk("----------------------\n");
+
+ /*
+ * Here we try to get the tasklist_lock as hard as possible,
+ * if not successful after 2 seconds we ignore it (but keep
+ * trying). This is to enable a debug printout even if a
+ * tasklist_lock-holding task deadlocks or crashes.
+ */
+retry:
+ if (!read_trylock(&tasklist_lock)) {
+ if (count == 10)
+ printk("hm, tasklist_lock locked, retrying... ");
+ if (count) {
+ count--;
+ printk(" #%d", 10-count);
+ mdelay(200);
+ goto retry;
+ }
+ printk(" ignoring it.\n");
+ unlock = 0;
+ }
+ if (count != 10)
+ printk(" locked it.\n");
+
+ do_each_thread(g, p) {
+ show_task_locks(p);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+ printk("\n");
+
+ printk("-----------------------------------------\n");
+ printk("| showing all locks held in the system: |\n");
+ printk("-----------------------------------------\n");
+
+ do_each_thread(g, p) {
+ rt_mutex_show_held_locks(p, 0);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+
+ printk("=============================================\n\n");
+
+ if (unlock)
+ read_unlock(&tasklist_lock);
+}
+
+void rt_mutex_debug_check_no_locks_held(task_t *task)
+{
+ struct rt_mutex_waiter *w;
+ struct list_head *curr;
+ struct rt_mutex *lock;
+
+ if (!rt_trace_on)
+ return;
+ if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
+ printk("BUG: PI priority boost leaked!\n");
+ printk_task(task);
+ printk("\n");
+ }
+ if (list_empty(&task->held_list_head))
+ return;
+
+ spin_lock(&task->pi_lock);
+ plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
+ TRACE_OFF();
+
+ printk("hm, PI interest held at exit time? Task:\n");
+ printk_task(task);
+ printk_waiter(w);
+ return;
+ }
+ spin_unlock(&task->pi_lock);
+
+ list_for_each(curr, &task->held_list_head) {
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+
+ printk("BUG: %s/%d, lock held at task exit time!\n",
+ task->comm, task->pid);
+ printk_lock(lock, 1);
+ if (rt_mutex_owner(lock) != task)
+ printk("exiting task is not even the owner??\n");
+ }
+}
+
+int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+ const void *to = from + len;
+ struct list_head *curr;
+ struct rt_mutex *lock;
+ unsigned long flags;
+ void *lock_addr;
+
+ if (!rt_trace_on)
+ return 0;
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_for_each(curr, &current->held_list_head) {
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+ lock_addr = lock;
+ if (lock_addr < from || lock_addr >= to)
+ continue;
+ TRACE_OFF();
+
+ printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
+ current->comm, current->pid, lock, from, to);
+ dump_stack();
+ printk_lock(lock, 1);
+ if (rt_mutex_owner(lock) != current)
+ printk("freeing task is not even the owner??\n");
+ return 1;
+ }
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+
+ return 0;
+}
+
+void rt_mutex_debug_task_free(struct task_struct *task)
+{
+ WARN_ON(!plist_head_empty(&task->pi_waiters));
+ WARN_ON(task->pi_blocked_on);
+}
+
+/*
+ * We fill out the fields in the waiter to store the information about
+ * the deadlock. We print when we return. act_waiter can be NULL in
+ * case of a remove waiter operation.
+ */
+void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
+ struct rt_mutex *lock)
+{
+ struct task_struct *task;
+
+ if (!rt_trace_on || detect || !act_waiter)
+ return;
+
+ task = rt_mutex_owner(act_waiter->lock);
+ if (task && task != current) {
+ act_waiter->deadlock_task_pid = task->pid;
+ act_waiter->deadlock_lock = lock;
+ }
+}
+
+void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
+{
+ struct task_struct *task;
+
+ if (!waiter->deadlock_lock || !rt_trace_on)
+ return;
+
+ task = find_task_by_pid(waiter->deadlock_task_pid);
+ if (!task)
+ return;
+
+ TRACE_OFF_NOLOCK();
+
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk( "--------------------------------------------\n");
+ printk("%s/%d is deadlocking current task %s/%d\n\n",
+ task->comm, task->pid, current->comm, current->pid);
+
+ printk("\n1) %s/%d is trying to acquire this lock:\n",
+ current->comm, current->pid);
+ printk_lock(waiter->lock, 1);
+
+ printk("... trying at: ");
+ print_symbol("%s\n", waiter->ip);
+
+ printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
+ printk_lock(waiter->deadlock_lock, 1);
+
+ rt_mutex_show_held_locks(current, 1);
+ rt_mutex_show_held_locks(task, 1);
+
+ printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
+ show_stack(task, NULL);
+ printk("\n%s/%d's [current] stackdump:\n\n",
+ current->comm, current->pid);
+ dump_stack();
+ rt_mutex_show_all_locks();
+ printk("[ turning off deadlock detection."
+ "Please report this trace. ]\n\n");
+ local_irq_disable();
+}
+
+void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_add_tail(&lock->held_list_entry, &current->held_list_head);
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_rt_mutex_unlock(struct rt_mutex *lock)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
+ TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_del_init(&lock->held_list_entry);
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+ }
+}
+
+void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner __IP_DECL__)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&powner->held_list_lock, flags);
+ list_add_tail(&lock->held_list_entry, &powner->held_list_head);
+ spin_unlock_irqrestore(&powner->held_list_lock, flags);
+
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ struct task_struct *owner = rt_mutex_owner(lock);
+
+ TRACE_WARN_ON_LOCKED(!owner);
+ TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&owner->held_list_lock, flags);
+ list_del_init(&lock->held_list_entry);
+ spin_unlock_irqrestore(&owner->held_list_lock, flags);
+ }
+}
+
+void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+{
+ memset(waiter, 0x11, sizeof(*waiter));
+ plist_node_init(&waiter->list_entry, MAX_PRIO);
+ plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
+}
+
+void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+{
+ TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
+ TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ TRACE_WARN_ON(waiter->task);
+ memset(waiter, 0x22, sizeof(*waiter));
+}
+
+void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ void *addr = lock;
+
+ if (rt_trace_on) {
+ rt_mutex_debug_check_no_locks_freed(addr,
+ sizeof(struct rt_mutex));
+ INIT_LIST_HEAD(&lock->held_list_entry);
+ lock->name = name;
+ }
+}
+
+void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
+{
+}
+
+void rt_mutex_deadlock_account_unlock(struct task_struct *task)
+{
+}
+
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
new file mode 100644
index 000000000000..7612fbc62d70
--- /dev/null
+++ b/kernel/rtmutex-debug.h
@@ -0,0 +1,37 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains macros used solely by rtmutex.c. Debug version.
+ */
+
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+extern void
+rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
+extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
+extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
+extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
+extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__);
+extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
+extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner __IP_DECL__);
+extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
+extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
+ struct rt_mutex *lock);
+extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
+# define debug_rt_mutex_reset_waiter(w) \
+ do { (w)->deadlock_lock = NULL; } while (0)
+
+static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ int detect)
+{
+ return (waiter != NULL);
+}
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
new file mode 100644
index 000000000000..e82c2f848249
--- /dev/null
+++ b/kernel/rtmutex-tester.c
@@ -0,0 +1,440 @@
+/*
+ * RT-Mutex-tester: scriptable tester for rt mutexes
+ *
+ * started by Thomas Gleixner:
+ *
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ */
+#include <linux/config.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+
+#include "rtmutex.h"
+
+#define MAX_RT_TEST_THREADS 8
+#define MAX_RT_TEST_MUTEXES 8
+
+static spinlock_t rttest_lock;
+static atomic_t rttest_event;
+
+struct test_thread_data {
+ int opcode;
+ int opdata;
+ int mutexes[MAX_RT_TEST_MUTEXES];
+ int bkl;
+ int event;
+ struct sys_device sysdev;
+};
+
+static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
+static task_t *threads[MAX_RT_TEST_THREADS];
+static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
+
+enum test_opcodes {
+ RTTEST_NOP = 0,
+ RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
+ RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
+ RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
+ RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
+ RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
+ RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
+ RTTEST_LOCKBKL, /* 9 Lock BKL */
+ RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
+ RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
+ RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
+ RTTEST_RESET = 99, /* 99 Reset all pending operations */
+};
+
+static int handle_op(struct test_thread_data *td, int lockwakeup)
+{
+ int i, id, ret = -EINVAL;
+
+ switch(td->opcode) {
+
+ case RTTEST_NOP:
+ return 0;
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
+ if (td->mutexes[i] == 4) {
+ rt_mutex_unlock(&mutexes[i]);
+ td->mutexes[i] = 0;
+ }
+ }
+
+ if (!lockwakeup && td->bkl == 4) {
+ unlock_kernel();
+ td->bkl = 0;
+ }
+ return 0;
+
+ case RTTEST_RESETEVENT:
+ atomic_set(&rttest_event, 0);
+ return 0;
+
+ default:
+ if (lockwakeup)
+ return ret;
+ }
+
+ switch(td->opcode) {
+
+ case RTTEST_LOCK:
+ case RTTEST_LOCKNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKINTNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+ case RTTEST_UNLOCK:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+ case RTTEST_LOCKBKL:
+ if (td->bkl)
+ return 0;
+ td->bkl = 1;
+ lock_kernel();
+ td->bkl = 4;
+ return 0;
+
+ case RTTEST_UNLOCKBKL:
+ if (td->bkl != 4)
+ break;
+ unlock_kernel();
+ td->bkl = 0;
+ return 0;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Schedule replacement for rtsem_down(). Only called for threads with
+ * PF_MUTEX_TESTER set.
+ *
+ * This allows us to have finegrained control over the event flow.
+ *
+ */
+void schedule_rt_mutex_test(struct rt_mutex *mutex)
+{
+ int tid, op, dat;
+ struct test_thread_data *td;
+
+ /* We have to lookup the task */
+ for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
+ if (threads[tid] == current)
+ break;
+ }
+
+ BUG_ON(tid == MAX_RT_TEST_THREADS);
+
+ td = &thread_data[tid];
+
+ op = td->opcode;
+ dat = td->opdata;
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ break;
+
+ if (td->mutexes[dat] != 1)
+ break;
+
+ td->mutexes[dat] = 2;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKBKL:
+ default:
+ break;
+ }
+
+ schedule();
+
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 3;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return;
+
+ case RTTEST_LOCKBKL:
+ return;
+ default:
+ return;
+ }
+
+ td->opcode = 0;
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ int ret;
+
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 1);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (td->opcode == RTTEST_LOCKCONT)
+ break;
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+ }
+
+ /* Restore previous command and data */
+ td->opcode = op;
+ td->opdata = dat;
+}
+
+static int test_func(void *data)
+{
+ struct test_thread_data *td = data;
+ int ret;
+
+ current->flags |= PF_MUTEX_TESTER;
+ allow_signal(SIGHUP);
+
+ for(;;) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 0);
+ set_current_state(TASK_INTERRUPTIBLE);
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ if(kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+/**
+ * sysfs_test_command - interface for test commands
+ * @dev: thread reference
+ * @buf: command for actual step
+ * @count: length of buffer
+ *
+ * command syntax:
+ *
+ * opcode:data
+ */
+static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ struct sched_param schedpar;
+ struct test_thread_data *td;
+ char cmdbuf[32];
+ int op, dat, tid, ret;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tid = td->sysdev.id;
+
+ /* strings from sysfs write are not 0 terminated! */
+ if (count >= sizeof(cmdbuf))
+ return -EINVAL;
+
+ /* strip of \n: */
+ if (buf[count-1] == '\n')
+ count--;
+ if (count < 1)
+ return -EINVAL;
+
+ memcpy(cmdbuf, buf, count);
+ cmdbuf[count] = 0;
+
+ if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
+ return -EINVAL;
+
+ switch (op) {
+ case RTTEST_SCHEDOT:
+ schedpar.sched_priority = 0;
+ ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
+ if (ret)
+ return ret;
+ set_user_nice(current, 0);
+ break;
+
+ case RTTEST_SCHEDRT:
+ schedpar.sched_priority = dat;
+ ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
+ if (ret)
+ return ret;
+ break;
+
+ case RTTEST_SIGNAL:
+ send_sig(SIGHUP, threads[tid], 0);
+ break;
+
+ default:
+ if (td->opcode > 0)
+ return -EBUSY;
+ td->opdata = dat;
+ td->opcode = op;
+ wake_up_process(threads[tid]);
+ }
+
+ return count;
+}
+
+/**
+ * sysfs_test_status - sysfs interface for rt tester
+ * @dev: thread to query
+ * @buf: char buffer to be filled with thread status info
+ */
+static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
+{
+ struct test_thread_data *td;
+ char *curr = buf;
+ task_t *tsk;
+ int i;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tsk = threads[td->sysdev.id];
+
+ spin_lock(&rttest_lock);
+
+ curr += sprintf(curr,
+ "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
+ td->opcode, td->event, tsk->state,
+ (MAX_RT_PRIO - 1) - tsk->prio,
+ (MAX_RT_PRIO - 1) - tsk->normal_prio,
+ tsk->pi_blocked_on, td->bkl);
+
+ for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
+ curr += sprintf(curr, "%d", td->mutexes[i]);
+
+ spin_unlock(&rttest_lock);
+
+ curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
+ mutexes[td->sysdev.id].owner);
+
+ return curr - buf;
+}
+
+static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
+static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
+
+static struct sysdev_class rttest_sysclass = {
+ set_kset_name("rttest"),
+};
+
+static int init_test_thread(int id)
+{
+ thread_data[id].sysdev.cls = &rttest_sysclass;
+ thread_data[id].sysdev.id = id;
+
+ threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
+ if (IS_ERR(threads[id]))
+ return PTR_ERR(threads[id]);
+
+ return sysdev_register(&thread_data[id].sysdev);
+}
+
+static int init_rttest(void)
+{
+ int ret, i;
+
+ spin_lock_init(&rttest_lock);
+
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
+ rt_mutex_init(&mutexes[i]);
+
+ ret = sysdev_class_register(&rttest_sysclass);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
+ ret = init_test_thread(i);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
+ if (ret)
+ break;
+ }
+
+ printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
+
+ return ret;
+}
+
+device_initcall(init_rttest);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
new file mode 100644
index 000000000000..45d61016da57
--- /dev/null
+++ b/kernel/rtmutex.c
@@ -0,0 +1,990 @@
+/*
+ * RT-Mutexes: simple blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner.
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#include "rtmutex_common.h"
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# include "rtmutex-debug.h"
+#else
+# include "rtmutex.h"
+#endif
+
+/*
+ * lock->owner state tracking:
+ *
+ * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
+ * are used to keep track of the "owner is pending" and "lock has
+ * waiters" state.
+ *
+ * owner bit1 bit0
+ * NULL 0 0 lock is free (fast acquire possible)
+ * NULL 0 1 invalid state
+ * NULL 1 0 Transitional State*
+ * NULL 1 1 invalid state
+ * taskpointer 0 0 lock is held (fast release possible)
+ * taskpointer 0 1 task is pending owner
+ * taskpointer 1 0 lock is held and has waiters
+ * taskpointer 1 1 task is pending owner and lock has more waiters
+ *
+ * Pending ownership is assigned to the top (highest priority)
+ * waiter of the lock, when the lock is released. The thread is woken
+ * up and can now take the lock. Until the lock is taken (bit 0
+ * cleared) a competing higher priority thread can steal the lock
+ * which puts the woken up thread back on the waiters list.
+ *
+ * The fast atomic compare exchange based acquire and release is only
+ * possible when bit 0 and 1 of lock->owner are 0.
+ *
+ * (*) There's a small time where the owner can be NULL and the
+ * "lock has waiters" bit is set. This can happen when grabbing the lock.
+ * To prevent a cmpxchg of the owner releasing the lock, we need to set this
+ * bit before looking at the lock, hence the reason this is a transitional
+ * state.
+ */
+
+static void
+rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
+ unsigned long mask)
+{
+ unsigned long val = (unsigned long)owner | mask;
+
+ if (rt_mutex_has_waiters(lock))
+ val |= RT_MUTEX_HAS_WAITERS;
+
+ lock->owner = (struct task_struct *)val;
+}
+
+static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ if (!rt_mutex_has_waiters(lock))
+ clear_rt_mutex_waiters(lock);
+}
+
+/*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+ */
+#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
+# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ do {
+ owner = *p;
+ } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+}
+#else
+# define rt_mutex_cmpxchg(l,c,n) (0)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+}
+#endif
+
+/*
+ * Calculate task priority from the waiter list priority
+ *
+ * Return task->normal_prio when the waiter list is empty or when
+ * the waiter is not allowed to do priority boosting
+ */
+int rt_mutex_getprio(struct task_struct *task)
+{
+ if (likely(!task_has_pi_waiters(task)))
+ return task->normal_prio;
+
+ return min(task_top_pi_waiter(task)->pi_list_entry.prio,
+ task->normal_prio);
+}
+
+/*
+ * Adjust the priority of a task, after its pi_waiters got modified.
+ *
+ * This can be both boosting and unboosting. task->pi_lock must be held.
+ */
+static void __rt_mutex_adjust_prio(struct task_struct *task)
+{
+ int prio = rt_mutex_getprio(task);
+
+ if (task->prio != prio)
+ rt_mutex_setprio(task, prio);
+}
+
+/*
+ * Adjust task priority (undo boosting). Called from the exit path of
+ * rt_mutex_slowunlock() and rt_mutex_slowlock().
+ *
+ * (Note: We do this outside of the protection of lock->wait_lock to
+ * allow the lock to be taken while or before we readjust the priority
+ * of task. We do not use the spin_xx_mutex() variants here as we are
+ * outside of the debug path.)
+ */
+static void rt_mutex_adjust_prio(struct task_struct *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->pi_lock, flags);
+ __rt_mutex_adjust_prio(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+}
+
+/*
+ * Max number of times we'll walk the boosting chain:
+ */
+int max_lock_depth = 1024;
+
+/*
+ * Adjust the priority chain. Also used for deadlock detection.
+ * Decreases task's usage by one - may thus free the task.
+ * Returns 0 or -EDEADLK.
+ */
+static int rt_mutex_adjust_prio_chain(task_t *task,
+ int deadlock_detect,
+ struct rt_mutex *orig_lock,
+ struct rt_mutex_waiter *orig_waiter,
+ struct task_struct *top_task
+ __IP_DECL__)
+{
+ struct rt_mutex *lock;
+ struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
+ int detect_deadlock, ret = 0, depth = 0;
+ unsigned long flags;
+
+ detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
+ deadlock_detect);
+
+ /*
+ * The (de)boosting is a step by step approach with a lot of
+ * pitfalls. We want this to be preemptible and we want hold a
+ * maximum of two locks per step. So we have to check
+ * carefully whether things change under us.
+ */
+ again:
+ if (++depth > max_lock_depth) {
+ static int prev_max;
+
+ /*
+ * Print this only once. If the admin changes the limit,
+ * print a new message when reaching the limit again.
+ */
+ if (prev_max != max_lock_depth) {
+ prev_max = max_lock_depth;
+ printk(KERN_WARNING "Maximum lock depth %d reached "
+ "task: %s (%d)\n", max_lock_depth,
+ top_task->comm, top_task->pid);
+ }
+ put_task_struct(task);
+
+ return deadlock_detect ? -EDEADLK : 0;
+ }
+ retry:
+ /*
+ * Task can not go away as we did a get_task() before !
+ */
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+ /*
+ * Check whether the end of the boosting chain has been
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+ if (!waiter || !waiter->task)
+ goto out_unlock_pi;
+
+ if (top_waiter && (!task_has_pi_waiters(task) ||
+ top_waiter != task_top_pi_waiter(task)))
+ goto out_unlock_pi;
+
+ /*
+ * When deadlock detection is off then we check, if further
+ * priority adjustment is necessary.
+ */
+ if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+ goto out_unlock_pi;
+
+ lock = waiter->lock;
+ if (!spin_trylock(&lock->wait_lock)) {
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ cpu_relax();
+ goto retry;
+ }
+
+ /* Deadlock detection */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ spin_unlock(&lock->wait_lock);
+ ret = deadlock_detect ? -EDEADLK : 0;
+ goto out_unlock_pi;
+ }
+
+ top_waiter = rt_mutex_top_waiter(lock);
+
+ /* Requeue the waiter */
+ plist_del(&waiter->list_entry, &lock->wait_list);
+ waiter->list_entry.prio = task->prio;
+ plist_add(&waiter->list_entry, &lock->wait_list);
+
+ /* Release the task */
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ put_task_struct(task);
+
+ /* Grab the next task */
+ task = rt_mutex_owner(lock);
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ if (waiter == rt_mutex_top_waiter(lock)) {
+ /* Boost the owner */
+ plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
+ waiter->pi_list_entry.prio = waiter->list_entry.prio;
+ plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ __rt_mutex_adjust_prio(task);
+
+ } else if (top_waiter == waiter) {
+ /* Deboost the owner */
+ plist_del(&waiter->pi_list_entry, &task->pi_waiters);
+ waiter = rt_mutex_top_waiter(lock);
+ waiter->pi_list_entry.prio = waiter->list_entry.prio;
+ plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ __rt_mutex_adjust_prio(task);
+ }
+
+ get_task_struct(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ spin_unlock(&lock->wait_lock);
+
+ if (!detect_deadlock && waiter != top_waiter)
+ goto out_put_task;
+
+ goto again;
+
+ out_unlock_pi:
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ out_put_task:
+ put_task_struct(task);
+ return ret;
+}
+
+/*
+ * Optimization: check if we can steal the lock from the
+ * assigned pending owner [which might not have taken the
+ * lock yet]:
+ */
+static inline int try_to_steal_lock(struct rt_mutex *lock)
+{
+ struct task_struct *pendowner = rt_mutex_owner(lock);
+ struct rt_mutex_waiter *next;
+ unsigned long flags;
+
+ if (!rt_mutex_owner_pending(lock))
+ return 0;
+
+ if (pendowner == current)
+ return 1;
+
+ spin_lock_irqsave(&pendowner->pi_lock, flags);
+ if (current->prio >= pendowner->prio) {
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ return 0;
+ }
+
+ /*
+ * Check if a waiter is enqueued on the pending owners
+ * pi_waiters list. Remove it and readjust pending owners
+ * priority.
+ */
+ if (likely(!rt_mutex_has_waiters(lock))) {
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ return 1;
+ }
+
+ /* No chain handling, pending owner is not blocked on anything: */
+ next = rt_mutex_top_waiter(lock);
+ plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
+ __rt_mutex_adjust_prio(pendowner);
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+
+ /*
+ * We are going to steal the lock and a waiter was
+ * enqueued on the pending owners pi_waiters queue. So
+ * we have to enqueue this waiter into
+ * current->pi_waiters list. This covers the case,
+ * where current is boosted because it holds another
+ * lock and gets unboosted because the booster is
+ * interrupted, so we would delay a waiter with higher
+ * priority as current->normal_prio.
+ *
+ * Note: in the rare case of a SCHED_OTHER task changing
+ * its priority and thus stealing the lock, next->task
+ * might be current:
+ */
+ if (likely(next->task != current)) {
+ spin_lock_irqsave(&current->pi_lock, flags);
+ plist_add(&next->pi_list_entry, &current->pi_waiters);
+ __rt_mutex_adjust_prio(current);
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+ }
+ return 1;
+}
+
+/*
+ * Try to take an rt-mutex
+ *
+ * This fails
+ * - when the lock has a real owner
+ * - when a different pending owner exists and has higher priority than current
+ *
+ * Must be called with lock->wait_lock held.
+ */
+static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
+{
+ /*
+ * We have to be careful here if the atomic speedups are
+ * enabled, such that, when
+ * - no other waiter is on the lock
+ * - the lock has been released since we did the cmpxchg
+ * the lock can be released or taken while we are doing the
+ * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+ *
+ * The atomic acquire/release aware variant of
+ * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
+ * the WAITERS bit, the atomic release / acquire can not
+ * happen anymore and lock->wait_lock protects us from the
+ * non-atomic case.
+ *
+ * Note, that this might set lock->owner =
+ * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
+ * any more. This is fixed up when we take the ownership.
+ * This is the transitional state explained at the top of this file.
+ */
+ mark_rt_mutex_waiters(lock);
+
+ if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
+ return 0;
+
+ /* We got the lock. */
+ debug_rt_mutex_lock(lock __IP__);
+
+ rt_mutex_set_owner(lock, current, 0);
+
+ rt_mutex_deadlock_account_lock(lock, current);
+
+ return 1;
+}
+
+/*
+ * Task blocks on lock.
+ *
+ * Prepare waiter and propagate pi chain
+ *
+ * This must be called with lock->wait_lock held.
+ */
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ int detect_deadlock
+ __IP_DECL__)
+{
+ struct rt_mutex_waiter *top_waiter = waiter;
+ task_t *owner = rt_mutex_owner(lock);
+ int boost = 0, res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+ __rt_mutex_adjust_prio(current);
+ waiter->task = current;
+ waiter->lock = lock;
+ plist_node_init(&waiter->list_entry, current->prio);
+ plist_node_init(&waiter->pi_list_entry, current->prio);
+
+ /* Get the top priority waiter on the lock */
+ if (rt_mutex_has_waiters(lock))
+ top_waiter = rt_mutex_top_waiter(lock);
+ plist_add(&waiter->list_entry, &lock->wait_list);
+
+ current->pi_blocked_on = waiter;
+
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ if (waiter == rt_mutex_top_waiter(lock)) {
+ spin_lock_irqsave(&owner->pi_lock, flags);
+ plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
+ plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ __rt_mutex_adjust_prio(owner);
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+ else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+ spin_lock_irqsave(&owner->pi_lock, flags);
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+ if (!boost)
+ return 0;
+
+ spin_unlock(&lock->wait_lock);
+
+ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+ current __IP__);
+
+ spin_lock(&lock->wait_lock);
+
+ return res;
+}
+
+/*
+ * Wake up the next waiter on the lock.
+ *
+ * Remove the top waiter from the current tasks waiter list and from
+ * the lock waiter list. Set it as pending owner. Then wake it up.
+ *
+ * Called with lock->wait_lock held.
+ */
+static void wakeup_next_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *waiter;
+ struct task_struct *pendowner;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+
+ waiter = rt_mutex_top_waiter(lock);
+ plist_del(&waiter->list_entry, &lock->wait_list);
+
+ /*
+ * Remove it from current->pi_waiters. We do not adjust a
+ * possible priority boost right now. We execute wakeup in the
+ * boosted mode and go back to normal after releasing
+ * lock->wait_lock.
+ */
+ plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+ pendowner = waiter->task;
+ waiter->task = NULL;
+
+ rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
+
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ /*
+ * Clear the pi_blocked_on variable and enqueue a possible
+ * waiter into the pi_waiters list of the pending owner. This
+ * prevents that in case the pending owner gets unboosted a
+ * waiter with higher priority than pending-owner->normal_prio
+ * is blocked on the unboosted (pending) owner.
+ */
+ spin_lock_irqsave(&pendowner->pi_lock, flags);
+
+ WARN_ON(!pendowner->pi_blocked_on);
+ WARN_ON(pendowner->pi_blocked_on != waiter);
+ WARN_ON(pendowner->pi_blocked_on->lock != lock);
+
+ pendowner->pi_blocked_on = NULL;
+
+ if (rt_mutex_has_waiters(lock)) {
+ struct rt_mutex_waiter *next;
+
+ next = rt_mutex_top_waiter(lock);
+ plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
+ }
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+
+ wake_up_process(pendowner);
+}
+
+/*
+ * Remove a waiter from a lock
+ *
+ * Must be called with lock->wait_lock held
+ */
+static void remove_waiter(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter __IP_DECL__)
+{
+ int first = (waiter == rt_mutex_top_waiter(lock));
+ int boost = 0;
+ task_t *owner = rt_mutex_owner(lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+ plist_del(&waiter->list_entry, &lock->wait_list);
+ waiter->task = NULL;
+ current->pi_blocked_on = NULL;
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ if (first && owner != current) {
+
+ spin_lock_irqsave(&owner->pi_lock, flags);
+
+ plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ if (rt_mutex_has_waiters(lock)) {
+ struct rt_mutex_waiter *next;
+
+ next = rt_mutex_top_waiter(lock);
+ plist_add(&next->pi_list_entry, &owner->pi_waiters);
+ }
+ __rt_mutex_adjust_prio(owner);
+
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+
+ WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+
+ if (!boost)
+ return;
+
+ spin_unlock(&lock->wait_lock);
+
+ rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
+
+ spin_lock(&lock->wait_lock);
+}
+
+/*
+ * Recheck the pi chain, in case we got a priority setting
+ *
+ * Called from sched_setscheduler
+ */
+void rt_mutex_adjust_pi(struct task_struct *task)
+{
+ struct rt_mutex_waiter *waiter;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+ if (!waiter || waiter->list_entry.prio == task->prio) {
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
+}
+
+/*
+ * Slow path lock function:
+ */
+static int __sched
+rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__)
+{
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+ debug_rt_mutex_init_waiter(&waiter);
+ waiter.task = NULL;
+
+ spin_lock(&lock->wait_lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock __IP__)) {
+ spin_unlock(&lock->wait_lock);
+ return 0;
+ }
+
+ set_current_state(state);
+
+ /* Setup the timer, when timeout != NULL */
+ if (unlikely(timeout))
+ hrtimer_start(&timeout->timer, timeout->timer.expires,
+ HRTIMER_ABS);
+
+ for (;;) {
+ /* Try to acquire the lock: */
+ if (try_to_take_rt_mutex(lock __IP__))
+ break;
+
+ /*
+ * TASK_INTERRUPTIBLE checks for signals and
+ * timeout. Ignored otherwise.
+ */
+ if (unlikely(state == TASK_INTERRUPTIBLE)) {
+ /* Signal pending? */
+ if (signal_pending(current))
+ ret = -EINTR;
+ if (timeout && !timeout->task)
+ ret = -ETIMEDOUT;
+ if (ret)
+ break;
+ }
+
+ /*
+ * waiter.task is NULL the first time we come here and
+ * when we have been woken up by the previous owner
+ * but the lock got stolen by a higher prio task.
+ */
+ if (!waiter.task) {
+ ret = task_blocks_on_rt_mutex(lock, &waiter,
+ detect_deadlock __IP__);
+ /*
+ * If we got woken up by the owner then start loop
+ * all over without going into schedule to try
+ * to get the lock now:
+ */
+ if (unlikely(!waiter.task))
+ continue;
+
+ if (unlikely(ret))
+ break;
+ }
+
+ spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (waiter.task)
+ schedule_rt_mutex(lock);
+
+ spin_lock(&lock->wait_lock);
+ set_current_state(state);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ if (unlikely(waiter.task))
+ remove_waiter(lock, &waiter __IP__);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ spin_unlock(&lock->wait_lock);
+
+ /* Remove pending timer: */
+ if (unlikely(timeout))
+ hrtimer_cancel(&timeout->timer);
+
+ /*
+ * Readjust priority, when we did not get the lock. We might
+ * have been the pending owner and boosted. Since we did not
+ * take the lock, the PI boost has to go.
+ */
+ if (unlikely(ret))
+ rt_mutex_adjust_prio(current);
+
+ debug_rt_mutex_free_waiter(&waiter);
+
+ return ret;
+}
+
+/*
+ * Slow path try-lock function:
+ */
+static inline int
+rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
+{
+ int ret = 0;
+
+ spin_lock(&lock->wait_lock);
+
+ if (likely(rt_mutex_owner(lock) != current)) {
+
+ ret = try_to_take_rt_mutex(lock __IP__);
+ /*
+ * try_to_take_rt_mutex() sets the lock waiters
+ * bit unconditionally. Clean this up.
+ */
+ fixup_rt_mutex_waiters(lock);
+ }
+
+ spin_unlock(&lock->wait_lock);
+
+ return ret;
+}
+
+/*
+ * Slow path to release a rt-mutex:
+ */
+static void __sched
+rt_mutex_slowunlock(struct rt_mutex *lock)
+{
+ spin_lock(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
+
+ rt_mutex_deadlock_account_unlock(current);
+
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ wakeup_next_waiter(lock);
+
+ spin_unlock(&lock->wait_lock);
+
+ /* Undo pi boosting if necessary: */
+ rt_mutex_adjust_prio(current);
+}
+
+/*
+ * debug aware fast / slowpath lock,trylock,unlock
+ *
+ * The atomic acquire/release ops are compiled away, when either the
+ * architecture does not support cmpxchg or when debugging is enabled.
+ */
+static inline int
+rt_mutex_fastlock(struct rt_mutex *lock, int state,
+ int detect_deadlock,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__))
+{
+ if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+ return slowfn(lock, state, NULL, detect_deadlock __RET_IP__);
+}
+
+static inline int
+rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout, int detect_deadlock,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__))
+{
+ if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+ return slowfn(lock, state, timeout, detect_deadlock __RET_IP__);
+}
+
+static inline int
+rt_mutex_fasttrylock(struct rt_mutex *lock,
+ int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
+{
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 1;
+ }
+ return slowfn(lock __RET_IP__);
+}
+
+static inline void
+rt_mutex_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
+ rt_mutex_deadlock_account_unlock(current);
+ else
+ slowfn(lock);
+}
+
+/**
+ * rt_mutex_lock - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ */
+void __sched rt_mutex_lock(struct rt_mutex *lock)
+{
+ might_sleep();
+
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock);
+
+/**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+ * @lock: the rt_mutex to be locked
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock)
+{
+ might_sleep();
+
+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
+ detect_deadlock, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+/**
+ * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+ *
+ * @lock: the rt_mutex to be locked
+ * @timeout: timeout structure or NULL (no timeout)
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+ * -ETIMEOUT when the timeout expired
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+int
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
+ int detect_deadlock)
+{
+ might_sleep();
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+ detect_deadlock, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+
+/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ *
+ * Returns 1 on success and 0 on contention
+ */
+int __sched rt_mutex_trylock(struct rt_mutex *lock)
+{
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock);
+
+/**
+ * rt_mutex_unlock - unlock a rt_mutex
+ *
+ * @lock: the rt_mutex to be unlocked
+ */
+void __sched rt_mutex_unlock(struct rt_mutex *lock)
+{
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+/***
+ * rt_mutex_destroy - mark a mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+void rt_mutex_destroy(struct rt_mutex *lock)
+{
+ WARN_ON(rt_mutex_is_locked(lock));
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ lock->magic = NULL;
+#endif
+}
+
+EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+
+/**
+ * __rt_mutex_init - initialize the rt lock
+ *
+ * @lock: the rt lock to be initialized
+ *
+ * Initialize the rt lock to unlocked state.
+ *
+ * Initializing of a locked rt lock is not allowed
+ */
+void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ lock->owner = NULL;
+ spin_lock_init(&lock->wait_lock);
+ plist_head_init(&lock->wait_list, &lock->wait_lock);
+
+ debug_rt_mutex_init(lock, name);
+}
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
+
+/**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+ * proxy owner
+ *
+ * @lock: the rt_mutex to be locked
+ * @proxy_owner:the task to set as owner
+ *
+ * No locking. Caller has to do serializing itself
+ * Special API call for PI-futex support
+ */
+void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+{
+ __rt_mutex_init(lock, NULL);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__);
+ rt_mutex_set_owner(lock, proxy_owner, 0);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+}
+
+/**
+ * rt_mutex_proxy_unlock - release a lock on behalf of owner
+ *
+ * @lock: the rt_mutex to be locked
+ *
+ * No locking. Caller has to do serializing itself
+ * Special API call for PI-futex support
+ */
+void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+{
+ debug_rt_mutex_proxy_unlock(lock);
+ rt_mutex_set_owner(lock, NULL, 0);
+ rt_mutex_deadlock_account_unlock(proxy_owner);
+}
+
+/**
+ * rt_mutex_next_owner - return the next owner of the lock
+ *
+ * @lock: the rt lock query
+ *
+ * Returns the next owner of the lock or NULL
+ *
+ * Caller has to serialize against other accessors to the lock
+ * itself.
+ *
+ * Special API call for PI-futex support
+ */
+struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
+{
+ if (!rt_mutex_has_waiters(lock))
+ return NULL;
+
+ return rt_mutex_top_waiter(lock)->task;
+}
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
new file mode 100644
index 000000000000..1e0fca13ff72
--- /dev/null
+++ b/kernel/rtmutex.h
@@ -0,0 +1,29 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains macros used solely by rtmutex.c.
+ * Non-debug version.
+ */
+
+#define __IP_DECL__
+#define __IP__
+#define __RET_IP__
+#define rt_mutex_deadlock_check(l) (0)
+#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
+#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
+#define debug_rt_mutex_init_waiter(w) do { } while (0)
+#define debug_rt_mutex_free_waiter(w) do { } while (0)
+#define debug_rt_mutex_lock(l) do { } while (0)
+#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
+#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
+#define debug_rt_mutex_unlock(l) do { } while (0)
+#define debug_rt_mutex_init(m, n) do { } while (0)
+#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
+#define debug_rt_mutex_print_deadlock(w) do { } while (0)
+#define debug_rt_mutex_detect_deadlock(w,d) (d)
+#define debug_rt_mutex_reset_waiter(w) do { } while (0)
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
new file mode 100644
index 000000000000..9c75856e791e
--- /dev/null
+++ b/kernel/rtmutex_common.h
@@ -0,0 +1,123 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains the private data structure and API definitions.
+ */
+
+#ifndef __KERNEL_RTMUTEX_COMMON_H
+#define __KERNEL_RTMUTEX_COMMON_H
+
+#include <linux/rtmutex.h>
+
+/*
+ * The rtmutex in kernel tester is independent of rtmutex debugging. We
+ * call schedule_rt_mutex_test() instead of schedule() for the tasks which
+ * belong to the tester. That way we can delay the wakeup path of those
+ * threads to provoke lock stealing and testing of complex boosting scenarios.
+ */
+#ifdef CONFIG_RT_MUTEX_TESTER
+
+extern void schedule_rt_mutex_test(struct rt_mutex *lock);
+
+#define schedule_rt_mutex(_lock) \
+ do { \
+ if (!(current->flags & PF_MUTEX_TESTER)) \
+ schedule(); \
+ else \
+ schedule_rt_mutex_test(_lock); \
+ } while (0)
+
+#else
+# define schedule_rt_mutex(_lock) schedule()
+#endif
+
+/*
+ * This is the control structure for tasks blocked on a rt_mutex,
+ * which is allocated on the kernel stack on of the blocked task.
+ *
+ * @list_entry: pi node to enqueue into the mutex waiters list
+ * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
+ * @task: task reference to the blocked task
+ */
+struct rt_mutex_waiter {
+ struct plist_node list_entry;
+ struct plist_node pi_list_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ pid_t deadlock_task_pid;
+ struct rt_mutex *deadlock_lock;
+#endif
+};
+
+/*
+ * Various helpers to access the waiters-plist:
+ */
+static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
+{
+ return !plist_head_empty(&lock->wait_list);
+}
+
+static inline struct rt_mutex_waiter *
+rt_mutex_top_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *w;
+
+ w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
+ list_entry);
+ BUG_ON(w->lock != lock);
+
+ return w;
+}
+
+static inline int task_has_pi_waiters(struct task_struct *p)
+{
+ return !plist_head_empty(&p->pi_waiters);
+}
+
+static inline struct rt_mutex_waiter *
+task_top_pi_waiter(struct task_struct *p)
+{
+ return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
+ pi_list_entry);
+}
+
+/*
+ * lock->owner state tracking:
+ */
+#define RT_MUTEX_OWNER_PENDING 1UL
+#define RT_MUTEX_HAS_WAITERS 2UL
+#define RT_MUTEX_OWNER_MASKALL 3UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+{
+ return (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
+}
+
+static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
+{
+ return (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
+{
+ return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
+}
+
+/*
+ * PI-futex support (proxy locking functions, etc.):
+ */
+extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index f06d059edef5..2629c1711fd6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -168,15 +168,21 @@
*/
#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
-static unsigned int task_timeslice(task_t *p)
+static unsigned int static_prio_timeslice(int static_prio)
{
- if (p->static_prio < NICE_TO_PRIO(0))
- return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
+ if (static_prio < NICE_TO_PRIO(0))
+ return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
else
- return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+ return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
+
+static inline unsigned int task_timeslice(task_t *p)
+{
+ return static_prio_timeslice(p->static_prio);
+}
+
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
@@ -184,13 +190,11 @@ static unsigned int task_timeslice(task_t *p)
* These are the runqueue data structures:
*/
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
typedef struct runqueue runqueue_t;
struct prio_array {
unsigned int nr_active;
- unsigned long bitmap[BITMAP_SIZE];
+ DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
struct list_head queue[MAX_PRIO];
};
@@ -209,6 +213,7 @@ struct runqueue {
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running;
+ unsigned long raw_weighted_load;
#ifdef CONFIG_SMP
unsigned long cpu_load[3];
#endif
@@ -239,7 +244,6 @@ struct runqueue {
task_t *migration_thread;
struct list_head migration_queue;
- int cpu;
#endif
#ifdef CONFIG_SCHEDSTATS
@@ -351,11 +355,30 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
+ * __task_rq_lock - lock the runqueue a given task resides on.
+ * Must be called interrupts disabled.
+ */
+static inline runqueue_t *__task_rq_lock(task_t *p)
+ __acquires(rq->lock)
+{
+ struct runqueue *rq;
+
+repeat_lock_task:
+ rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (unlikely(rq != task_rq(p))) {
+ spin_unlock(&rq->lock);
+ goto repeat_lock_task;
+ }
+ return rq;
+}
+
+/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
__acquires(rq->lock)
{
struct runqueue *rq;
@@ -371,6 +394,12 @@ repeat_lock_task:
return rq;
}
+static inline void __task_rq_unlock(runqueue_t *rq)
+ __releases(rq->lock)
+{
+ spin_unlock(&rq->lock);
+}
+
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
__releases(rq->lock)
{
@@ -634,7 +663,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
}
/*
- * effective_prio - return the priority that is based on the static
+ * __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
@@ -647,13 +676,11 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
*
* Both properties are important to certain workloads.
*/
-static int effective_prio(task_t *p)
+
+static inline int __normal_prio(task_t *p)
{
int bonus, prio;
- if (rt_task(p))
- return p->prio;
-
bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
prio = p->static_prio - bonus;
@@ -665,6 +692,106 @@ static int effective_prio(task_t *p)
}
/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+ (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+ LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static void set_load_weight(task_t *p)
+{
+ if (has_rt_policy(p)) {
+#ifdef CONFIG_SMP
+ if (p == task_rq(p)->migration_thread)
+ /*
+ * The migration thread does the actual balancing.
+ * Giving its load any weight will skew balancing
+ * adversely.
+ */
+ p->load_weight = 0;
+ else
+#endif
+ p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+ } else
+ p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+ rq->raw_weighted_load += p->load_weight;
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+ rq->raw_weighted_load -= p->load_weight;
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running++;
+ inc_raw_weighted_load(rq, p);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running--;
+ dec_raw_weighted_load(rq, p);
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(task_t *p)
+{
+ int prio;
+
+ if (has_rt_policy(p))
+ prio = MAX_RT_PRIO-1 - p->rt_priority;
+ else
+ prio = __normal_prio(p);
+ return prio;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks, or might be boosted by
+ * interactivity modifiers. Will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(task_t *p)
+{
+ p->normal_prio = normal_prio(p);
+ /*
+ * If we are RT tasks or we were boosted to RT priority,
+ * keep the priority unchanged. Otherwise, update priority
+ * to the normal priority:
+ */
+ if (!rt_prio(p->prio))
+ return p->normal_prio;
+ return p->prio;
+}
+
+/*
* __activate_task - move a task to the runqueue.
*/
static void __activate_task(task_t *p, runqueue_t *rq)
@@ -674,7 +801,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
if (batch_task(p))
target = rq->expired;
enqueue_task(p, target);
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
/*
@@ -683,39 +810,45 @@ static void __activate_task(task_t *p, runqueue_t *rq)
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
+/*
+ * Recalculate p->normal_prio and p->prio after having slept,
+ * updating the sleep-average too:
+ */
static int recalc_task_prio(task_t *p, unsigned long long now)
{
/* Caller must always ensure 'now >= p->timestamp' */
- unsigned long long __sleep_time = now - p->timestamp;
- unsigned long sleep_time;
+ unsigned long sleep_time = now - p->timestamp;
if (batch_task(p))
sleep_time = 0;
- else {
- if (__sleep_time > NS_MAX_SLEEP_AVG)
- sleep_time = NS_MAX_SLEEP_AVG;
- else
- sleep_time = (unsigned long)__sleep_time;
- }
if (likely(sleep_time > 0)) {
/*
- * User tasks that sleep a long time are categorised as
- * idle. They will only have their sleep_avg increased to a
- * level that makes them just interactive priority to stay
- * active yet prevent them suddenly becoming cpu hogs and
- * starving other processes.
+ * This ceiling is set to the lowest priority that would allow
+ * a task to be reinserted into the active array on timeslice
+ * completion.
*/
- if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
- unsigned long ceiling;
+ unsigned long ceiling = INTERACTIVE_SLEEP(p);
- ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
- DEF_TIMESLICE);
- if (p->sleep_avg < ceiling)
- p->sleep_avg = ceiling;
+ if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
+ /*
+ * Prevents user tasks from achieving best priority
+ * with one single large enough sleep.
+ */
+ p->sleep_avg = ceiling;
+ /*
+ * Using INTERACTIVE_SLEEP() as a ceiling places a
+ * nice(0) task 1ms sleep away from promotion, and
+ * gives it 700ms to round-robin with no chance of
+ * being demoted. This is more than generous, so
+ * mark this sleep as non-interactive to prevent the
+ * on-runqueue bonus logic from intervening should
+ * this task not receive cpu immediately.
+ */
+ p->sleep_type = SLEEP_NONINTERACTIVE;
} else {
/*
* Tasks waking from uninterruptible sleep are
@@ -723,12 +856,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
* are likely to be waiting on I/O
*/
if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
- if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
+ if (p->sleep_avg >= ceiling)
sleep_time = 0;
else if (p->sleep_avg + sleep_time >=
- INTERACTIVE_SLEEP(p)) {
- p->sleep_avg = INTERACTIVE_SLEEP(p);
- sleep_time = 0;
+ ceiling) {
+ p->sleep_avg = ceiling;
+ sleep_time = 0;
}
}
@@ -742,9 +875,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
*/
p->sleep_avg += sleep_time;
- if (p->sleep_avg > NS_MAX_SLEEP_AVG)
- p->sleep_avg = NS_MAX_SLEEP_AVG;
}
+ if (p->sleep_avg > NS_MAX_SLEEP_AVG)
+ p->sleep_avg = NS_MAX_SLEEP_AVG;
}
return effective_prio(p);
@@ -805,7 +938,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
*/
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
- rq->nr_running--;
+ dec_nr_running(p, rq);
dequeue_task(p, p->array);
p->array = NULL;
}
@@ -818,6 +951,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
* the target CPU.
*/
#ifdef CONFIG_SMP
+
+#ifndef tsk_is_polling
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#endif
+
static void resched_task(task_t *p)
{
int cpu;
@@ -833,9 +971,9 @@ static void resched_task(task_t *p)
if (cpu == smp_processor_id())
return;
- /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+ /* NEED_RESCHED must be visible before we test polling */
smp_mb();
- if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+ if (!tsk_is_polling(p))
smp_send_reschedule(cpu);
}
#else
@@ -855,6 +993,12 @@ inline int task_curr(const task_t *p)
return cpu_curr(task_cpu(p)) == p;
}
+/* Used instead of source_load when we know the type == 0 */
+unsigned long weighted_cpuload(const int cpu)
+{
+ return cpu_rq(cpu)->raw_weighted_load;
+}
+
#ifdef CONFIG_SMP
typedef struct {
struct list_head list;
@@ -944,7 +1088,8 @@ void kick_process(task_t *p)
}
/*
- * Return a low guess at the load of a migration-source cpu.
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
*
* We want to under-estimate the load of migration sources, to
* balance conservatively.
@@ -952,24 +1097,36 @@ void kick_process(task_t *p)
static inline unsigned long source_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+
if (type == 0)
- return load_now;
+ return rq->raw_weighted_load;
- return min(rq->cpu_load[type-1], load_now);
+ return min(rq->cpu_load[type-1], rq->raw_weighted_load);
}
/*
- * Return a high guess at the load of a migration-target cpu
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
*/
static inline unsigned long target_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+
if (type == 0)
- return load_now;
+ return rq->raw_weighted_load;
+
+ return max(rq->cpu_load[type-1], rq->raw_weighted_load);
+}
+
+/*
+ * Return the average load per task on the cpu's run queue
+ */
+static inline unsigned long cpu_avg_load_per_task(int cpu)
+{
+ runqueue_t *rq = cpu_rq(cpu);
+ unsigned long n = rq->nr_running;
- return max(rq->cpu_load[type-1], load_now);
+ return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
}
/*
@@ -1042,7 +1199,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
cpus_and(tmp, group->cpumask, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
- load = source_load(i, 0);
+ load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) {
min_load = load;
@@ -1069,9 +1226,15 @@ static int sched_balance_self(int cpu, int flag)
struct task_struct *t = current;
struct sched_domain *tmp, *sd = NULL;
- for_each_domain(cpu, tmp)
+ for_each_domain(cpu, tmp) {
+ /*
+ * If power savings logic is enabled for a domain, stop there.
+ */
+ if (tmp->flags & SD_POWERSAVINGS_BALANCE)
+ break;
if (tmp->flags & flag)
sd = tmp;
+ }
while (sd) {
cpumask_t span;
@@ -1221,17 +1384,19 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
if (this_sd->flags & SD_WAKE_AFFINE) {
unsigned long tl = this_load;
+ unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
+
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
- tl -= SCHED_LOAD_SCALE;
+ tl -= current->load_weight;
if ((tl <= load &&
- tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
- 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
+ tl + target_load(cpu, idx) <= tl_per_task) ||
+ 100*(tl + p->load_weight) <= imbalance*load) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
@@ -1348,6 +1513,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child:
+ */
+ p->prio = current->normal_prio;
+
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
#ifdef CONFIG_SCHEDSTATS
@@ -1427,10 +1598,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
__activate_task(p, rq);
else {
p->prio = current->prio;
+ p->normal_prio = current->normal_prio;
list_add_tail(&p->run_list, &current->run_list);
p->array = current->array;
p->array->nr_active++;
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
set_need_resched();
} else
@@ -1648,7 +1820,8 @@ unsigned long nr_uninterruptible(void)
unsigned long long nr_context_switches(void)
{
- unsigned long long i, sum = 0;
+ int i;
+ unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
@@ -1686,9 +1859,6 @@ unsigned long nr_active(void)
/*
* double_rq_lock - safely lock two runqueues
*
- * We must take them in cpu order to match code in
- * dependent_sleeper and wake_dependent_sleeper.
- *
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
@@ -1700,7 +1870,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
- if (rq1->cpu < rq2->cpu) {
+ if (rq1 < rq2) {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
@@ -1736,7 +1906,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
__acquires(this_rq->lock)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
- if (busiest->cpu < this_rq->cpu) {
+ if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
@@ -1799,9 +1969,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
- src_rq->nr_running--;
+ dec_nr_running(p, src_rq);
set_task_cpu(p, this_cpu);
- this_rq->nr_running++;
+ inc_nr_running(p, this_rq);
enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
@@ -1848,26 +2018,42 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
return 1;
}
+#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
/*
- * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
- * as part of a balancing operation within "domain". Returns the number of
- * tasks moved.
+ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
+ * load from busiest to this_rq, as part of a balancing operation within
+ * "domain". Returns the number of tasks moved.
*
* Called with both runqueues locked.
*/
static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
- unsigned long max_nr_move, struct sched_domain *sd,
- enum idle_type idle, int *all_pinned)
+ unsigned long max_nr_move, unsigned long max_load_move,
+ struct sched_domain *sd, enum idle_type idle,
+ int *all_pinned)
{
prio_array_t *array, *dst_array;
struct list_head *head, *curr;
- int idx, pulled = 0, pinned = 0;
+ int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio;
+ int busiest_best_prio_seen;
+ int skip_for_load; /* skip the task based on weighted load issues */
+ long rem_load_move;
task_t *tmp;
- if (max_nr_move == 0)
+ if (max_nr_move == 0 || max_load_move == 0)
goto out;
+ rem_load_move = max_load_move;
pinned = 1;
+ this_best_prio = rq_best_prio(this_rq);
+ busiest_best_prio = rq_best_prio(busiest);
+ /*
+ * Enable handling of the case where there is more than one task
+ * with the best priority. If the current running task is one
+ * of those with prio==busiest_best_prio we know it won't be moved
+ * and therefore it's safe to override the skip (based on load) of
+ * any task we find with that prio.
+ */
+ busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio;
/*
* We first consider expired tasks. Those will likely not be
@@ -1907,7 +2093,17 @@ skip_queue:
curr = curr->prev;
- if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+ /*
+ * To help distribute high priority tasks accross CPUs we don't
+ * skip a task if it will be the highest priority task (i.e. smallest
+ * prio value) on its new queue regardless of its load weight
+ */
+ skip_for_load = tmp->load_weight > rem_load_move;
+ if (skip_for_load && idx < this_best_prio)
+ skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio;
+ if (skip_for_load ||
+ !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+ busiest_best_prio_seen |= idx == busiest_best_prio;
if (curr != head)
goto skip_queue;
idx++;
@@ -1921,9 +2117,15 @@ skip_queue:
pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
pulled++;
+ rem_load_move -= tmp->load_weight;
- /* We only want to steal up to the prescribed number of tasks. */
- if (pulled < max_nr_move) {
+ /*
+ * We only want to steal up to the prescribed number of tasks
+ * and the prescribed amount of weighted load.
+ */
+ if (pulled < max_nr_move && rem_load_move > 0) {
+ if (idx < this_best_prio)
+ this_best_prio = idx;
if (curr != head)
goto skip_queue;
idx++;
@@ -1944,7 +2146,7 @@ out:
/*
* find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the number of tasks which should be
+ * domain. It calculates and returns the amount of weighted load which should be
* moved to restore balance via the imbalance parameter.
*/
static struct sched_group *
@@ -1954,9 +2156,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
unsigned long max_pull;
+ unsigned long busiest_load_per_task, busiest_nr_running;
+ unsigned long this_load_per_task, this_nr_running;
int load_idx;
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance = 1;
+ unsigned long leader_nr_running = 0, min_load_per_task = 0;
+ unsigned long min_nr_running = ULONG_MAX;
+ struct sched_group *group_min = NULL, *group_leader = NULL;
+#endif
max_load = this_load = total_load = total_pwr = 0;
+ busiest_load_per_task = busiest_nr_running = 0;
+ this_load_per_task = this_nr_running = 0;
if (idle == NOT_IDLE)
load_idx = sd->busy_idx;
else if (idle == NEWLY_IDLE)
@@ -1965,16 +2177,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = sd->idle_idx;
do {
- unsigned long load;
+ unsigned long load, group_capacity;
int local_group;
int i;
+ unsigned long sum_nr_running, sum_weighted_load;
local_group = cpu_isset(this_cpu, group->cpumask);
/* Tally up the load of all CPUs in the group */
- avg_load = 0;
+ sum_weighted_load = sum_nr_running = avg_load = 0;
for_each_cpu_mask(i, group->cpumask) {
+ runqueue_t *rq = cpu_rq(i);
+
if (*sd_idle && !idle_cpu(i))
*sd_idle = 0;
@@ -1985,6 +2200,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load = source_load(i, load_idx);
avg_load += load;
+ sum_nr_running += rq->nr_running;
+ sum_weighted_load += rq->raw_weighted_load;
}
total_load += avg_load;
@@ -1993,17 +2210,80 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Adjust by relative CPU power of the group */
avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+ group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
+
if (local_group) {
this_load = avg_load;
this = group;
- } else if (avg_load > max_load) {
+ this_nr_running = sum_nr_running;
+ this_load_per_task = sum_weighted_load;
+ } else if (avg_load > max_load &&
+ sum_nr_running > group_capacity) {
max_load = avg_load;
busiest = group;
+ busiest_nr_running = sum_nr_running;
+ busiest_load_per_task = sum_weighted_load;
}
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ goto group_next;
+
+ /*
+ * If the local group is idle or completely loaded
+ * no need to do power savings balance at this domain
+ */
+ if (local_group && (this_nr_running >= group_capacity ||
+ !this_nr_running))
+ power_savings_balance = 0;
+
+ /*
+ * If a group is already running at full capacity or idle,
+ * don't include that group in power savings calculations
+ */
+ if (!power_savings_balance || sum_nr_running >= group_capacity
+ || !sum_nr_running)
+ goto group_next;
+
+ /*
+ * Calculate the group which has the least non-idle load.
+ * This is the group from where we need to pick up the load
+ * for saving power
+ */
+ if ((sum_nr_running < min_nr_running) ||
+ (sum_nr_running == min_nr_running &&
+ first_cpu(group->cpumask) <
+ first_cpu(group_min->cpumask))) {
+ group_min = group;
+ min_nr_running = sum_nr_running;
+ min_load_per_task = sum_weighted_load /
+ sum_nr_running;
+ }
+
+ /*
+ * Calculate the group which is almost near its
+ * capacity but still has some space to pick up some load
+ * from other group and save more power
+ */
+ if (sum_nr_running <= group_capacity - 1)
+ if (sum_nr_running > leader_nr_running ||
+ (sum_nr_running == leader_nr_running &&
+ first_cpu(group->cpumask) >
+ first_cpu(group_leader->cpumask))) {
+ group_leader = group;
+ leader_nr_running = sum_nr_running;
+ }
+
+group_next:
+#endif
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
+ if (!busiest || this_load >= max_load || busiest_nr_running == 0)
goto out_balanced;
avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -2012,6 +2292,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
100*max_load <= sd->imbalance_pct*this_load)
goto out_balanced;
+ busiest_load_per_task /= busiest_nr_running;
/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
@@ -2023,21 +2304,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
+ if (max_load <= busiest_load_per_task)
+ goto out_balanced;
+
+ /*
+ * In the presence of smp nice balancing, certain scenarios can have
+ * max load less than avg load(as we skip the groups at or below
+ * its cpu_power, while calculating max_load..)
+ */
+ if (max_load < avg_load) {
+ *imbalance = 0;
+ goto small_imbalance;
+ }
/* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * busiest->cpu_power,
(avg_load - this_load) * this->cpu_power)
/ SCHED_LOAD_SCALE;
- if (*imbalance < SCHED_LOAD_SCALE) {
- unsigned long pwr_now = 0, pwr_move = 0;
+ /*
+ * if *imbalance is less than the average load per runnable task
+ * there is no gaurantee that any tasks will be moved so we'll have
+ * a think about bumping its value to force at least one task to be
+ * moved
+ */
+ if (*imbalance < busiest_load_per_task) {
+ unsigned long pwr_now, pwr_move;
unsigned long tmp;
+ unsigned int imbn;
+
+small_imbalance:
+ pwr_move = pwr_now = 0;
+ imbn = 2;
+ if (this_nr_running) {
+ this_load_per_task /= this_nr_running;
+ if (busiest_load_per_task > this_load_per_task)
+ imbn = 1;
+ } else
+ this_load_per_task = SCHED_LOAD_SCALE;
- if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
- *imbalance = 1;
+ if (max_load - this_load >= busiest_load_per_task * imbn) {
+ *imbalance = busiest_load_per_task;
return busiest;
}
@@ -2047,39 +2357,47 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* moving them.
*/
- pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
- pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
+ pwr_now += busiest->cpu_power *
+ min(busiest_load_per_task, max_load);
+ pwr_now += this->cpu_power *
+ min(this_load_per_task, this_load);
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
+ tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
if (max_load > tmp)
- pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
- max_load - tmp);
+ pwr_move += busiest->cpu_power *
+ min(busiest_load_per_task, max_load - tmp);
/* Amount of load we'd add */
if (max_load*busiest->cpu_power <
- SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
+ busiest_load_per_task*SCHED_LOAD_SCALE)
tmp = max_load*busiest->cpu_power/this->cpu_power;
else
- tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
- pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
+ tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
+ pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
if (pwr_move <= pwr_now)
goto out_balanced;
- *imbalance = 1;
- return busiest;
+ *imbalance = busiest_load_per_task;
}
- /* Get rid of the scaling factor, rounding down as we divide */
- *imbalance = *imbalance / SCHED_LOAD_SCALE;
return busiest;
out_balanced:
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ goto ret;
+ if (this == group_leader && group_leader != group_min) {
+ *imbalance = min_load_per_task;
+ return group_min;
+ }
+ret:
+#endif
*imbalance = 0;
return NULL;
}
@@ -2088,18 +2406,21 @@ out_balanced:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static runqueue_t *find_busiest_queue(struct sched_group *group,
- enum idle_type idle)
+ enum idle_type idle, unsigned long imbalance)
{
- unsigned long load, max_load = 0;
- runqueue_t *busiest = NULL;
+ unsigned long max_load = 0;
+ runqueue_t *busiest = NULL, *rqi;
int i;
for_each_cpu_mask(i, group->cpumask) {
- load = source_load(i, 0);
+ rqi = cpu_rq(i);
- if (load > max_load) {
- max_load = load;
- busiest = cpu_rq(i);
+ if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance)
+ continue;
+
+ if (rqi->raw_weighted_load > max_load) {
+ max_load = rqi->raw_weighted_load;
+ busiest = rqi;
}
}
@@ -2112,6 +2433,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
*/
#define MAX_PINNED_INTERVAL 512
+#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0)
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
@@ -2128,7 +2450,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
int active_balance = 0;
int sd_idle = 0;
- if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+ if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+ !sched_smt_power_savings)
sd_idle = 1;
schedstat_inc(sd, lb_cnt[idle]);
@@ -2139,7 +2462,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}
- busiest = find_busiest_queue(group, idle);
+ busiest = find_busiest_queue(group, idle, imbalance);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
@@ -2159,6 +2482,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
*/
double_rq_lock(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
+ minus_1_or_zero(busiest->nr_running),
imbalance, sd, idle, &all_pinned);
double_rq_unlock(this_rq, busiest);
@@ -2216,7 +2540,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
sd->balance_interval *= 2;
}
- if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+ !sched_smt_power_savings)
return -1;
return nr_moved;
@@ -2231,7 +2556,7 @@ out_one_pinned:
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
return -1;
return 0;
}
@@ -2252,7 +2577,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
int nr_moved = 0;
int sd_idle = 0;
- if (sd->flags & SD_SHARE_CPUPOWER)
+ if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
sd_idle = 1;
schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
@@ -2262,7 +2587,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}
- busiest = find_busiest_queue(group, NEWLY_IDLE);
+ busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out_balanced;
@@ -2277,6 +2602,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
/* Attempt to move tasks */
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
+ minus_1_or_zero(busiest->nr_running),
imbalance, sd, NEWLY_IDLE, NULL);
spin_unlock(&busiest->lock);
}
@@ -2292,7 +2618,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
out_balanced:
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
return -1;
sd->nr_balance_failed = 0;
return 0;
@@ -2347,17 +2673,19 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
double_lock_balance(busiest_rq, target_rq);
/* Search for an sd spanning us and the target CPU. */
- for_each_domain(target_cpu, sd)
+ for_each_domain(target_cpu, sd) {
if ((sd->flags & SD_LOAD_BALANCE) &&
cpu_isset(busiest_cpu, sd->span))
break;
+ }
if (unlikely(sd == NULL))
goto out;
schedstat_inc(sd, alb_cnt);
- if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
+ if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
+ RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL))
schedstat_inc(sd, alb_pushed);
else
schedstat_inc(sd, alb_failed);
@@ -2385,7 +2713,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
struct sched_domain *sd;
int i;
- this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
+ this_load = this_rq->raw_weighted_load;
/* Update our load */
for (i = 0; i < 3; i++) {
unsigned long new_load = this_load;
@@ -2686,48 +3014,35 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq)
resched_task(rq->idle);
}
-static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+/*
+ * Called with interrupt disabled and this_rq's runqueue locked.
+ */
+static void wake_sleeping_dependent(int this_cpu)
{
struct sched_domain *tmp, *sd = NULL;
- cpumask_t sibling_map;
int i;
- for_each_domain(this_cpu, tmp)
- if (tmp->flags & SD_SHARE_CPUPOWER)
+ for_each_domain(this_cpu, tmp) {
+ if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
+ break;
+ }
+ }
if (!sd)
return;
- /*
- * Unlock the current runqueue because we have to lock in
- * CPU order to avoid deadlocks. Caller knows that we might
- * unlock. We keep IRQs disabled.
- */
- spin_unlock(&this_rq->lock);
-
- sibling_map = sd->span;
-
- for_each_cpu_mask(i, sibling_map)
- spin_lock(&cpu_rq(i)->lock);
- /*
- * We clear this CPU from the mask. This both simplifies the
- * inner loop and keps this_rq locked when we exit:
- */
- cpu_clear(this_cpu, sibling_map);
-
- for_each_cpu_mask(i, sibling_map) {
+ for_each_cpu_mask(i, sd->span) {
runqueue_t *smt_rq = cpu_rq(i);
+ if (i == this_cpu)
+ continue;
+ if (unlikely(!spin_trylock(&smt_rq->lock)))
+ continue;
+
wakeup_busy_runqueue(smt_rq);
+ spin_unlock(&smt_rq->lock);
}
-
- for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
- /*
- * We exit with this_cpu's rq still held and IRQs
- * still disabled:
- */
}
/*
@@ -2740,52 +3055,46 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
return p->time_slice * (100 - sd->per_cpu_gain) / 100;
}
-static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+/*
+ * To minimise lock contention and not have to drop this_rq's runlock we only
+ * trylock the sibling runqueues and bypass those runqueues if we fail to
+ * acquire their lock. As we only trylock the normal locking order does not
+ * need to be obeyed.
+ */
+static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
{
struct sched_domain *tmp, *sd = NULL;
- cpumask_t sibling_map;
- prio_array_t *array;
int ret = 0, i;
- task_t *p;
- for_each_domain(this_cpu, tmp)
- if (tmp->flags & SD_SHARE_CPUPOWER)
+ /* kernel/rt threads do not participate in dependent sleeping */
+ if (!p->mm || rt_task(p))
+ return 0;
+
+ for_each_domain(this_cpu, tmp) {
+ if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
+ break;
+ }
+ }
if (!sd)
return 0;
- /*
- * The same locking rules and details apply as for
- * wake_sleeping_dependent():
- */
- spin_unlock(&this_rq->lock);
- sibling_map = sd->span;
- for_each_cpu_mask(i, sibling_map)
- spin_lock(&cpu_rq(i)->lock);
- cpu_clear(this_cpu, sibling_map);
+ for_each_cpu_mask(i, sd->span) {
+ runqueue_t *smt_rq;
+ task_t *smt_curr;
- /*
- * Establish next task to be run - it might have gone away because
- * we released the runqueue lock above:
- */
- if (!this_rq->nr_running)
- goto out_unlock;
- array = this_rq->active;
- if (!array->nr_active)
- array = this_rq->expired;
- BUG_ON(!array->nr_active);
+ if (i == this_cpu)
+ continue;
- p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
- task_t, run_list);
+ smt_rq = cpu_rq(i);
+ if (unlikely(!spin_trylock(&smt_rq->lock)))
+ continue;
- for_each_cpu_mask(i, sibling_map) {
- runqueue_t *smt_rq = cpu_rq(i);
- task_t *smt_curr = smt_rq->curr;
+ smt_curr = smt_rq->curr;
- /* Kernel threads do not participate in dependent sleeping */
- if (!p->mm || !smt_curr->mm || rt_task(p))
- goto check_smt_task;
+ if (!smt_curr->mm)
+ goto unlock;
/*
* If a user task with lower static priority than the
@@ -2803,49 +3112,24 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
if ((jiffies % DEF_TIMESLICE) >
(sd->per_cpu_gain * DEF_TIMESLICE / 100))
ret = 1;
- } else
+ } else {
if (smt_curr->static_prio < p->static_prio &&
!TASK_PREEMPTS_CURR(p, smt_rq) &&
smt_slice(smt_curr, sd) > task_timeslice(p))
ret = 1;
-
-check_smt_task:
- if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
- rt_task(smt_curr))
- continue;
- if (!p->mm) {
- wakeup_busy_runqueue(smt_rq);
- continue;
- }
-
- /*
- * Reschedule a lower priority task on the SMT sibling for
- * it to be put to sleep, or wake it up if it has been put to
- * sleep for priority reasons to see if it should run now.
- */
- if (rt_task(p)) {
- if ((jiffies % DEF_TIMESLICE) >
- (sd->per_cpu_gain * DEF_TIMESLICE / 100))
- resched_task(smt_curr);
- } else {
- if (TASK_PREEMPTS_CURR(p, smt_rq) &&
- smt_slice(p, sd) > task_timeslice(smt_curr))
- resched_task(smt_curr);
- else
- wakeup_busy_runqueue(smt_rq);
}
+unlock:
+ spin_unlock(&smt_rq->lock);
}
-out_unlock:
- for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
return ret;
}
#else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+static inline void wake_sleeping_dependent(int this_cpu)
{
}
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq,
+ task_t *p)
{
return 0;
}
@@ -2967,32 +3251,13 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
if (unlikely(!rq->nr_running)) {
-go_idle:
idle_balance(cpu, rq);
if (!rq->nr_running) {
next = rq->idle;
rq->expired_timestamp = 0;
- wake_sleeping_dependent(cpu, rq);
- /*
- * wake_sleeping_dependent() might have released
- * the runqueue, so break out if we got new
- * tasks meanwhile:
- */
- if (!rq->nr_running)
- goto switch_tasks;
- }
- } else {
- if (dependent_sleeper(cpu, rq)) {
- next = rq->idle;
+ wake_sleeping_dependent(cpu);
goto switch_tasks;
}
- /*
- * dependent_sleeper() releases and reacquires the runqueue
- * lock, hence go into the idle loop if the rq went
- * empty meanwhile:
- */
- if (unlikely(!rq->nr_running))
- goto go_idle;
}
array = rq->active;
@@ -3030,6 +3295,8 @@ go_idle:
}
}
next->sleep_type = SLEEP_NORMAL;
+ if (dependent_sleeper(cpu, rq, next))
+ next = rq->idle;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
@@ -3473,12 +3740,65 @@ long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
EXPORT_SYMBOL(sleep_on_timeout);
+#ifdef CONFIG_RT_MUTEXES
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task
+ * @prio: prio value (kernel-internal form)
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+void rt_mutex_setprio(task_t *p, int prio)
+{
+ unsigned long flags;
+ prio_array_t *array;
+ runqueue_t *rq;
+ int oldprio;
+
+ BUG_ON(prio < 0 || prio > MAX_PRIO);
+
+ rq = task_rq_lock(p, &flags);
+
+ oldprio = p->prio;
+ array = p->array;
+ if (array)
+ dequeue_task(p, array);
+ p->prio = prio;
+
+ if (array) {
+ /*
+ * If changing to an RT priority then queue it
+ * in the active array!
+ */
+ if (rt_task(p))
+ array = rq->active;
+ enqueue_task(p, array);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+ if (task_running(rq, p)) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else if (TASK_PREEMPTS_CURR(p, rq))
+ resched_task(rq->curr);
+ }
+ task_rq_unlock(rq, &flags);
+}
+
+#endif
+
void set_user_nice(task_t *p, long nice)
{
unsigned long flags;
prio_array_t *array;
runqueue_t *rq;
- int old_prio, new_prio, delta;
+ int old_prio, delta;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
@@ -3493,22 +3813,25 @@ void set_user_nice(task_t *p, long nice)
* it wont have any effect on scheduling until the task is
* not SCHED_NORMAL/SCHED_BATCH:
*/
- if (rt_task(p)) {
+ if (has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
array = p->array;
- if (array)
+ if (array) {
dequeue_task(p, array);
+ dec_raw_weighted_load(rq, p);
+ }
- old_prio = p->prio;
- new_prio = NICE_TO_PRIO(nice);
- delta = new_prio - old_prio;
p->static_prio = NICE_TO_PRIO(nice);
- p->prio += delta;
+ set_load_weight(p);
+ old_prio = p->prio;
+ p->prio = effective_prio(p);
+ delta = p->prio - old_prio;
if (array) {
enqueue_task(p, array);
+ inc_raw_weighted_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -3519,7 +3842,6 @@ void set_user_nice(task_t *p, long nice)
out_unlock:
task_rq_unlock(rq, &flags);
}
-
EXPORT_SYMBOL(set_user_nice);
/*
@@ -3634,16 +3956,15 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
BUG_ON(p->array);
p->policy = policy;
p->rt_priority = prio;
- if (policy != SCHED_NORMAL && policy != SCHED_BATCH) {
- p->prio = MAX_RT_PRIO-1 - p->rt_priority;
- } else {
- p->prio = p->static_prio;
- /*
- * SCHED_BATCH tasks are treated as perpetual CPU hogs:
- */
- if (policy == SCHED_BATCH)
- p->sleep_avg = 0;
- }
+ p->normal_prio = normal_prio(p);
+ /* we are holding p->pi_lock already */
+ p->prio = rt_mutex_getprio(p);
+ /*
+ * SCHED_BATCH tasks are treated as perpetual CPU hogs:
+ */
+ if (policy == SCHED_BATCH)
+ p->sleep_avg = 0;
+ set_load_weight(p);
}
/**
@@ -3662,6 +3983,8 @@ int sched_setscheduler(struct task_struct *p, int policy,
unsigned long flags;
runqueue_t *rq;
+ /* may grab non-irq protected spin_locks */
+ BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0)
@@ -3710,14 +4033,20 @@ recheck:
if (retval)
return retval;
/*
+ * make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ */
+ spin_lock_irqsave(&p->pi_lock, flags);
+ /*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
*/
- rq = task_rq_lock(p, &flags);
+ rq = __task_rq_lock(p);
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
array = p->array;
@@ -3738,7 +4067,11 @@ recheck:
} else if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr);
}
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ rt_mutex_adjust_pi(p);
+
return 0;
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
@@ -3760,8 +4093,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
read_unlock_irq(&tasklist_lock);
return -ESRCH;
}
- retval = sched_setscheduler(p, policy, &lparam);
+ get_task_struct(p);
read_unlock_irq(&tasklist_lock);
+ retval = sched_setscheduler(p, policy, &lparam);
+ put_task_struct(p);
return retval;
}
@@ -4247,7 +4582,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- jiffies_to_timespec(p->policy & SCHED_FIFO ?
+ jiffies_to_timespec(p->policy == SCHED_FIFO ?
0 : task_timeslice(p), &t);
read_unlock(&tasklist_lock);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4373,7 +4708,7 @@ void __devinit init_idle(task_t *idle, int cpu)
idle->timestamp = sched_clock();
idle->sleep_avg = 0;
idle->array = NULL;
- idle->prio = MAX_PRIO;
+ idle->prio = idle->normal_prio = MAX_PRIO;
idle->state = TASK_RUNNING;
idle->cpus_allowed = cpumask_of_cpu(cpu);
set_task_cpu(idle, cpu);
@@ -4469,13 +4804,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
+ *
+ * Returns non-zero if task was successfully migrated.
*/
-static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
runqueue_t *rq_dest, *rq_src;
+ int ret = 0;
if (unlikely(cpu_is_offline(dest_cpu)))
- return;
+ return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
@@ -4503,9 +4841,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
if (TASK_PREEMPTS_CURR(p, rq_dest))
resched_task(rq_dest->curr);
}
-
+ ret = 1;
out:
double_rq_unlock(rq_src, rq_dest);
+ return ret;
}
/*
@@ -4575,9 +4914,12 @@ wait_to_die:
/* Figure out where task on dead CPU should go, use force if neccessary. */
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
{
+ runqueue_t *rq;
+ unsigned long flags;
int dest_cpu;
cpumask_t mask;
+restart:
/* On same node? */
mask = node_to_cpumask(cpu_to_node(dead_cpu));
cpus_and(mask, mask, tsk->cpus_allowed);
@@ -4589,8 +4931,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
/* No more Mr. Nice Guy. */
if (dest_cpu == NR_CPUS) {
+ rq = task_rq_lock(tsk, &flags);
cpus_setall(tsk->cpus_allowed);
dest_cpu = any_online_cpu(tsk->cpus_allowed);
+ task_rq_unlock(rq, &flags);
/*
* Don't tell them about moving exiting tasks or
@@ -4602,7 +4946,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
"longer affine to cpu%d\n",
tsk->pid, tsk->comm, dead_cpu);
}
- __migrate_task(tsk, dead_cpu, dest_cpu);
+ if (!__migrate_task(tsk, dead_cpu, dest_cpu))
+ goto restart;
}
/*
@@ -4729,8 +5074,9 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
-static int migration_call(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int __cpuinit migration_call(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
int cpu = (long)hcpu;
struct task_struct *p;
@@ -4800,7 +5146,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
/* Register at highest priority so that task migration (migrate_all_tasks)
* happens before everything else.
*/
-static struct notifier_block migration_notifier = {
+static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
.priority = 10
};
@@ -5601,6 +5947,7 @@ static cpumask_t sched_domain_node_span(int node)
}
#endif
+int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
/*
* At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
* can switch it on easily if needed.
@@ -5616,7 +5963,7 @@ static int cpu_to_cpu_group(int cpu)
#ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct sched_domain, core_domains);
-static struct sched_group sched_group_core[NR_CPUS];
+static struct sched_group *sched_group_core_bycpu[NR_CPUS];
#endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -5632,7 +5979,7 @@ static int cpu_to_core_group(int cpu)
#endif
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
-static struct sched_group sched_group_phys[NR_CPUS];
+static struct sched_group *sched_group_phys_bycpu[NR_CPUS];
static int cpu_to_phys_group(int cpu)
{
#if defined(CONFIG_SCHED_MC)
@@ -5689,13 +6036,74 @@ next_sg:
}
#endif
+/* Free memory allocated for various sched_group structures */
+static void free_sched_groups(const cpumask_t *cpu_map)
+{
+ int cpu;
+#ifdef CONFIG_NUMA
+ int i;
+
+ for_each_cpu_mask(cpu, *cpu_map) {
+ struct sched_group *sched_group_allnodes
+ = sched_group_allnodes_bycpu[cpu];
+ struct sched_group **sched_group_nodes
+ = sched_group_nodes_bycpu[cpu];
+
+ if (sched_group_allnodes) {
+ kfree(sched_group_allnodes);
+ sched_group_allnodes_bycpu[cpu] = NULL;
+ }
+
+ if (!sched_group_nodes)
+ continue;
+
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ cpumask_t nodemask = node_to_cpumask(i);
+ struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+ cpus_and(nodemask, nodemask, *cpu_map);
+ if (cpus_empty(nodemask))
+ continue;
+
+ if (sg == NULL)
+ continue;
+ sg = sg->next;
+next_sg:
+ oldsg = sg;
+ sg = sg->next;
+ kfree(oldsg);
+ if (oldsg != sched_group_nodes[i])
+ goto next_sg;
+ }
+ kfree(sched_group_nodes);
+ sched_group_nodes_bycpu[cpu] = NULL;
+ }
+#endif
+ for_each_cpu_mask(cpu, *cpu_map) {
+ if (sched_group_phys_bycpu[cpu]) {
+ kfree(sched_group_phys_bycpu[cpu]);
+ sched_group_phys_bycpu[cpu] = NULL;
+ }
+#ifdef CONFIG_SCHED_MC
+ if (sched_group_core_bycpu[cpu]) {
+ kfree(sched_group_core_bycpu[cpu]);
+ sched_group_core_bycpu[cpu] = NULL;
+ }
+#endif
+ }
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
-void build_sched_domains(const cpumask_t *cpu_map)
+static int build_sched_domains(const cpumask_t *cpu_map)
{
int i;
+ struct sched_group *sched_group_phys = NULL;
+#ifdef CONFIG_SCHED_MC
+ struct sched_group *sched_group_core = NULL;
+#endif
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
struct sched_group *sched_group_allnodes = NULL;
@@ -5703,11 +6111,11 @@ void build_sched_domains(const cpumask_t *cpu_map)
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
- GFP_ATOMIC);
+ sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+ GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
- return;
+ return -ENOMEM;
}
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
@@ -5733,7 +6141,7 @@ void build_sched_domains(const cpumask_t *cpu_map)
if (!sched_group_allnodes) {
printk(KERN_WARNING
"Can not alloc allnodes sched group\n");
- break;
+ goto error;
}
sched_group_allnodes_bycpu[i]
= sched_group_allnodes;
@@ -5754,6 +6162,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
cpus_and(sd->span, sd->span, *cpu_map);
#endif
+ if (!sched_group_phys) {
+ sched_group_phys
+ = kmalloc(sizeof(struct sched_group) * NR_CPUS,
+ GFP_KERNEL);
+ if (!sched_group_phys) {
+ printk (KERN_WARNING "Can not alloc phys sched"
+ "group\n");
+ goto error;
+ }
+ sched_group_phys_bycpu[i] = sched_group_phys;
+ }
+
p = sd;
sd = &per_cpu(phys_domains, i);
group = cpu_to_phys_group(i);
@@ -5763,6 +6183,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
sd->groups = &sched_group_phys[group];
#ifdef CONFIG_SCHED_MC
+ if (!sched_group_core) {
+ sched_group_core
+ = kmalloc(sizeof(struct sched_group) * NR_CPUS,
+ GFP_KERNEL);
+ if (!sched_group_core) {
+ printk (KERN_WARNING "Can not alloc core sched"
+ "group\n");
+ goto error;
+ }
+ sched_group_core_bycpu[i] = sched_group_core;
+ }
+
p = sd;
sd = &per_cpu(core_domains, i);
group = cpu_to_core_group(i);
@@ -5846,24 +6278,21 @@ void build_sched_domains(const cpumask_t *cpu_map)
domainspan = sched_domain_node_span(i);
cpus_and(domainspan, domainspan, *cpu_map);
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+ sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for "
+ "node %d\n", i);
+ goto error;
+ }
sched_group_nodes[i] = sg;
for_each_cpu_mask(j, nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
sd->groups = sg;
- if (sd->groups == NULL) {
- /* Turn off balancing if we have no groups */
- sd->flags = 0;
- }
- }
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", i);
- continue;
}
sg->cpu_power = 0;
sg->cpumask = nodemask;
+ sg->next = sg;
cpus_or(covered, covered, nodemask);
prev = sg;
@@ -5882,54 +6311,90 @@ void build_sched_domains(const cpumask_t *cpu_map)
if (cpus_empty(tmp))
continue;
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+ sg = kmalloc_node(sizeof(struct sched_group),
+ GFP_KERNEL, i);
if (!sg) {
printk(KERN_WARNING
"Can not alloc domain group for node %d\n", j);
- break;
+ goto error;
}
sg->cpu_power = 0;
sg->cpumask = tmp;
+ sg->next = prev->next;
cpus_or(covered, covered, tmp);
prev->next = sg;
prev = sg;
}
- prev->next = sched_group_nodes[i];
}
#endif
/* Calculate CPU power for physical packages and nodes */
+#ifdef CONFIG_SCHED_SMT
for_each_cpu_mask(i, *cpu_map) {
- int power;
struct sched_domain *sd;
-#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
- power = SCHED_LOAD_SCALE;
- sd->groups->cpu_power = power;
+ sd->groups->cpu_power = SCHED_LOAD_SCALE;
+ }
#endif
#ifdef CONFIG_SCHED_MC
+ for_each_cpu_mask(i, *cpu_map) {
+ int power;
+ struct sched_domain *sd;
sd = &per_cpu(core_domains, i);
- power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
+ if (sched_smt_power_savings)
+ power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
+ else
+ power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
* SCHED_LOAD_SCALE / 10;
sd->groups->cpu_power = power;
+ }
+#endif
+ for_each_cpu_mask(i, *cpu_map) {
+ struct sched_domain *sd;
+#ifdef CONFIG_SCHED_MC
sd = &per_cpu(phys_domains, i);
+ if (i != first_cpu(sd->groups->cpumask))
+ continue;
- /*
- * This has to be < 2 * SCHED_LOAD_SCALE
- * Lets keep it SCHED_LOAD_SCALE, so that
- * while calculating NUMA group's cpu_power
- * we can simply do
- * numa_group->cpu_power += phys_group->cpu_power;
- *
- * See "only add power once for each physical pkg"
- * comment below
- */
- sd->groups->cpu_power = SCHED_LOAD_SCALE;
+ sd->groups->cpu_power = 0;
+ if (sched_mc_power_savings || sched_smt_power_savings) {
+ int j;
+
+ for_each_cpu_mask(j, sd->groups->cpumask) {
+ struct sched_domain *sd1;
+ sd1 = &per_cpu(core_domains, j);
+ /*
+ * for each core we will add once
+ * to the group in physical domain
+ */
+ if (j != first_cpu(sd1->groups->cpumask))
+ continue;
+
+ if (sched_smt_power_savings)
+ sd->groups->cpu_power += sd1->groups->cpu_power;
+ else
+ sd->groups->cpu_power += SCHED_LOAD_SCALE;
+ }
+ } else
+ /*
+ * This has to be < 2 * SCHED_LOAD_SCALE
+ * Lets keep it SCHED_LOAD_SCALE, so that
+ * while calculating NUMA group's cpu_power
+ * we can simply do
+ * numa_group->cpu_power += phys_group->cpu_power;
+ *
+ * See "only add power once for each physical pkg"
+ * comment below
+ */
+ sd->groups->cpu_power = SCHED_LOAD_SCALE;
#else
+ int power;
sd = &per_cpu(phys_domains, i);
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
+ if (sched_smt_power_savings)
+ power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
+ else
+ power = SCHED_LOAD_SCALE;
sd->groups->cpu_power = power;
#endif
}
@@ -5957,13 +6422,20 @@ void build_sched_domains(const cpumask_t *cpu_map)
* Tune cache-hot values:
*/
calibrate_migration_costs(cpu_map);
+
+ return 0;
+
+error:
+ free_sched_groups(cpu_map);
+ return -ENOMEM;
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
*/
-static void arch_init_sched_domains(const cpumask_t *cpu_map)
+static int arch_init_sched_domains(const cpumask_t *cpu_map)
{
cpumask_t cpu_default_map;
+ int err;
/*
* Setup mask for cpus without special case scheduling requirements.
@@ -5972,51 +6444,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map)
*/
cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
- build_sched_domains(&cpu_default_map);
+ err = build_sched_domains(&cpu_default_map);
+
+ return err;
}
static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
{
-#ifdef CONFIG_NUMA
- int i;
- int cpu;
-
- for_each_cpu_mask(cpu, *cpu_map) {
- struct sched_group *sched_group_allnodes
- = sched_group_allnodes_bycpu[cpu];
- struct sched_group **sched_group_nodes
- = sched_group_nodes_bycpu[cpu];
-
- if (sched_group_allnodes) {
- kfree(sched_group_allnodes);
- sched_group_allnodes_bycpu[cpu] = NULL;
- }
-
- if (!sched_group_nodes)
- continue;
-
- for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
- struct sched_group *oldsg, *sg = sched_group_nodes[i];
-
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
- continue;
-
- if (sg == NULL)
- continue;
- sg = sg->next;
-next_sg:
- oldsg = sg;
- sg = sg->next;
- kfree(oldsg);
- if (oldsg != sched_group_nodes[i])
- goto next_sg;
- }
- kfree(sched_group_nodes);
- sched_group_nodes_bycpu[cpu] = NULL;
- }
-#endif
+ free_sched_groups(cpu_map);
}
/*
@@ -6041,9 +6476,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
* correct sched domains
* Call with hotplug lock held
*/
-void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
{
cpumask_t change_map;
+ int err = 0;
cpus_and(*partition1, *partition1, cpu_online_map);
cpus_and(*partition2, *partition2, cpu_online_map);
@@ -6052,10 +6488,86 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
/* Detach sched domains from all of the affected cpus */
detach_destroy_domains(&change_map);
if (!cpus_empty(*partition1))
- build_sched_domains(partition1);
- if (!cpus_empty(*partition2))
- build_sched_domains(partition2);
+ err = build_sched_domains(partition1);
+ if (!err && !cpus_empty(*partition2))
+ err = build_sched_domains(partition2);
+
+ return err;
+}
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+int arch_reinit_sched_domains(void)
+{
+ int err;
+
+ lock_cpu_hotplug();
+ detach_destroy_domains(&cpu_online_map);
+ err = arch_init_sched_domains(&cpu_online_map);
+ unlock_cpu_hotplug();
+
+ return err;
+}
+
+static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
+{
+ int ret;
+
+ if (buf[0] != '0' && buf[0] != '1')
+ return -EINVAL;
+
+ if (smt)
+ sched_smt_power_savings = (buf[0] == '1');
+ else
+ sched_mc_power_savings = (buf[0] == '1');
+
+ ret = arch_reinit_sched_domains();
+
+ return ret ? ret : count;
+}
+
+int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+{
+ int err = 0;
+#ifdef CONFIG_SCHED_SMT
+ if (smt_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_smt_power_savings.attr);
+#endif
+#ifdef CONFIG_SCHED_MC
+ if (!err && mc_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_mc_power_savings.attr);
+#endif
+ return err;
+}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
+{
+ return sprintf(page, "%u\n", sched_mc_power_savings);
+}
+static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
+{
+ return sched_power_savings_store(buf, count, 0);
+}
+SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
+ sched_mc_power_savings_store);
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
+{
+ return sprintf(page, "%u\n", sched_smt_power_savings);
+}
+static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
+{
+ return sched_power_savings_store(buf, count, 1);
}
+SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
+ sched_smt_power_savings_store);
+#endif
+
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -6138,7 +6650,6 @@ void __init sched_init(void)
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
- rq->cpu = i;
#endif
atomic_set(&rq->nr_iowait, 0);
@@ -6153,6 +6664,7 @@ void __init sched_init(void)
}
}
+ set_load_weight(&init_task);
/*
* The boot idle thread does lazy MMU switching as well:
*/
@@ -6199,11 +6711,12 @@ void normalize_rt_tasks(void)
runqueue_t *rq;
read_lock_irq(&tasklist_lock);
- for_each_process (p) {
+ for_each_process(p) {
if (!rt_task(p))
continue;
- rq = task_rq_lock(p, &flags);
+ spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_rq_lock(p);
array = p->array;
if (array)
@@ -6214,7 +6727,8 @@ void normalize_rt_tasks(void)
resched_task(rq->curr);
}
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
}
read_unlock_irq(&tasklist_lock);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 1b3c921737e2..52adf53929f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+static inline int may_ptrace_stop(void)
+{
+ if (!likely(current->ptrace & PT_PTRACED))
+ return 0;
+
+ if (unlikely(current->parent == current->real_parent &&
+ (current->ptrace & PT_ATTACHED)))
+ return 0;
+
+ if (unlikely(current->signal == current->parent->signal) &&
+ unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
+ return 0;
+
+ /*
+ * Are we in the middle of do_coredump?
+ * If so and our tracer is also part of the coredump stopping
+ * is a deadlock situation, and pointless because our tracer
+ * is dead so don't allow us to stop.
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_waiters != 0. Otherwise it
+ * is safe to enter schedule().
+ */
+ if (unlikely(current->mm->core_waiters) &&
+ unlikely(current->mm == current->parent->mm))
+ return 0;
+
+ return 1;
+}
+
/*
* This must be called with current->sighand->siglock held.
*
@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (likely(current->ptrace & PT_PTRACED) &&
- likely(current->parent != current->real_parent ||
- !(current->ptrace & PT_ATTACHED)) &&
- (likely(current->parent->signal != current->signal) ||
- !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
+ if (may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9e2f1c6e73d7..8f03e3b89b55 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -486,7 +486,7 @@ static int cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __devinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index b5c3b94e01ce..6b76caa22981 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int
+static int __devinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __devinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2c0e65819448..93a2c5398648 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -73,6 +73,7 @@ extern int printk_ratelimit_burst;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
+extern int compat_log;
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
int unknown_nmi_panic;
@@ -132,6 +133,10 @@ extern int acct_parm[];
extern int no_unaligned_warning;
#endif
+#ifdef CONFIG_RT_MUTEXES
+extern int max_lock_depth;
+#endif
+
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -677,6 +682,27 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_COMPAT
+ {
+ .ctl_name = KERN_COMPAT_LOG,
+ .procname = "compat-log",
+ .data = &compat_log,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_RT_MUTEXES
+ {
+ .ctl_name = KERN_MAX_LOCK_DEPTH,
+ .procname = "max_lock_depth",
+ .data = &max_lock_depth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+
{ .ctl_name = 0 }
};
@@ -917,6 +943,18 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_jiffies,
},
#endif
+#ifdef CONFIG_X86_32
+ {
+ .ctl_name = VM_VDSO_ENABLED,
+ .procname = "vdso_enabled",
+ .data = &vdso_enabled,
+ .maxlen = sizeof(vdso_enabled),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
+#endif
{ .ctl_name = 0 }
};
diff --git a/kernel/time.c b/kernel/time.c
index b00ddc71cedb..5bd489747643 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -523,6 +523,7 @@ EXPORT_SYMBOL(do_gettimeofday);
#else
+#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
* and therefore only yields usec accuracy
@@ -537,6 +538,7 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
+#endif
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
new file mode 100644
index 000000000000..e1dfd8e86cce
--- /dev/null
+++ b/kernel/time/Makefile
@@ -0,0 +1 @@
+obj-y += clocksource.o jiffies.o
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
new file mode 100644
index 000000000000..74eca5939bd9
--- /dev/null
+++ b/kernel/time/clocksource.c
@@ -0,0 +1,349 @@
+/*
+ * linux/kernel/time/clocksource.c
+ *
+ * This file contains the functions which manage clocksource drivers.
+ *
+ * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * TODO WishList:
+ * o Allow clocksource drivers to be unregistered
+ * o get rid of clocksource_jiffies extern
+ */
+
+#include <linux/clocksource.h>
+#include <linux/sysdev.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+/* XXX - Would like a better way for initializing curr_clocksource */
+extern struct clocksource clocksource_jiffies;
+
+/*[Clocksource internal variables]---------
+ * curr_clocksource:
+ * currently selected clocksource. Initialized to clocksource_jiffies.
+ * next_clocksource:
+ * pending next selected clocksource.
+ * clocksource_list:
+ * linked list with the registered clocksources
+ * clocksource_lock:
+ * protects manipulations to curr_clocksource and next_clocksource
+ * and the clocksource_list
+ * override_name:
+ * Name of the user-specified clocksource.
+ */
+static struct clocksource *curr_clocksource = &clocksource_jiffies;
+static struct clocksource *next_clocksource;
+static LIST_HEAD(clocksource_list);
+static DEFINE_SPINLOCK(clocksource_lock);
+static char override_name[32];
+static int finished_booting;
+
+/* clocksource_done_booting - Called near the end of bootup
+ *
+ * Hack to avoid lots of clocksource churn at boot time
+ */
+static int __init clocksource_done_booting(void)
+{
+ finished_booting = 1;
+ return 0;
+}
+
+late_initcall(clocksource_done_booting);
+
+/**
+ * clocksource_get_next - Returns the selected clocksource
+ *
+ */
+struct clocksource *clocksource_get_next(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ if (next_clocksource && finished_booting) {
+ curr_clocksource = next_clocksource;
+ next_clocksource = NULL;
+ }
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+
+ return curr_clocksource;
+}
+
+/**
+ * select_clocksource - Finds the best registered clocksource.
+ *
+ * Private function. Must hold clocksource_lock when called.
+ *
+ * Looks through the list of registered clocksources, returning
+ * the one with the highest rating value. If there is a clocksource
+ * name that matches the override string, it returns that clocksource.
+ */
+static struct clocksource *select_clocksource(void)
+{
+ struct clocksource *best = NULL;
+ struct list_head *tmp;
+
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ if (!best)
+ best = src;
+
+ /* check for override: */
+ if (strlen(src->name) == strlen(override_name) &&
+ !strcmp(src->name, override_name)) {
+ best = src;
+ break;
+ }
+ /* pick the highest rating: */
+ if (src->rating > best->rating)
+ best = src;
+ }
+
+ return best;
+}
+
+/**
+ * is_registered_source - Checks if clocksource is registered
+ * @c: pointer to a clocksource
+ *
+ * Private helper function. Must hold clocksource_lock when called.
+ *
+ * Returns one if the clocksource is already registered, zero otherwise.
+ */
+static int is_registered_source(struct clocksource *c)
+{
+ int len = strlen(c->name);
+ struct list_head *tmp;
+
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ if (strlen(src->name) == len && !strcmp(src->name, c->name))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * clocksource_register - Used to install new clocksources
+ * @t: clocksource to be registered
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int clocksource_register(struct clocksource *c)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ /* check if clocksource is already registered */
+ if (is_registered_source(c)) {
+ printk("register_clocksource: Cannot register %s. "
+ "Already registered!", c->name);
+ ret = -EBUSY;
+ } else {
+ /* register it */
+ list_add(&c->list, &clocksource_list);
+ /* scan the registered clocksources, and pick the best one */
+ next_clocksource = select_clocksource();
+ }
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(clocksource_register);
+
+/**
+ * clocksource_reselect - Rescan list for next clocksource
+ *
+ * A quick helper function to be used if a clocksource changes its
+ * rating. Forces the clocksource list to be re-scanned for the best
+ * clocksource.
+ */
+void clocksource_reselect(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ next_clocksource = select_clocksource();
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+}
+EXPORT_SYMBOL(clocksource_reselect);
+
+/**
+ * sysfs_show_current_clocksources - sysfs interface for current clocksource
+ * @dev: unused
+ * @buf: char buffer to be filled with clocksource list
+ *
+ * Provides sysfs interface for listing current clocksource.
+ */
+static ssize_t
+sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
+{
+ char *curr = buf;
+
+ spin_lock_irq(&clocksource_lock);
+ curr += sprintf(curr, "%s ", curr_clocksource->name);
+ spin_unlock_irq(&clocksource_lock);
+
+ curr += sprintf(curr, "\n");
+
+ return curr - buf;
+}
+
+/**
+ * sysfs_override_clocksource - interface for manually overriding clocksource
+ * @dev: unused
+ * @buf: name of override clocksource
+ * @count: length of buffer
+ *
+ * Takes input from sysfs interface for manually overriding the default
+ * clocksource selction.
+ */
+static ssize_t sysfs_override_clocksource(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ size_t ret = count;
+ /* strings from sysfs write are not 0 terminated! */
+ if (count >= sizeof(override_name))
+ return -EINVAL;
+
+ /* strip of \n: */
+ if (buf[count-1] == '\n')
+ count--;
+ if (count < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&clocksource_lock);
+
+ /* copy the name given: */
+ memcpy(override_name, buf, count);
+ override_name[count] = 0;
+
+ /* try to select it: */
+ next_clocksource = select_clocksource();
+
+ spin_unlock_irq(&clocksource_lock);
+
+ return ret;
+}
+
+/**
+ * sysfs_show_available_clocksources - sysfs interface for listing clocksource
+ * @dev: unused
+ * @buf: char buffer to be filled with clocksource list
+ *
+ * Provides sysfs interface for listing registered clocksources
+ */
+static ssize_t
+sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
+{
+ struct list_head *tmp;
+ char *curr = buf;
+
+ spin_lock_irq(&clocksource_lock);
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ curr += sprintf(curr, "%s ", src->name);
+ }
+ spin_unlock_irq(&clocksource_lock);
+
+ curr += sprintf(curr, "\n");
+
+ return curr - buf;
+}
+
+/*
+ * Sysfs setup bits:
+ */
+static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources,
+ sysfs_override_clocksource);
+
+static SYSDEV_ATTR(available_clocksource, 0600,
+ sysfs_show_available_clocksources, NULL);
+
+static struct sysdev_class clocksource_sysclass = {
+ set_kset_name("clocksource"),
+};
+
+static struct sys_device device_clocksource = {
+ .id = 0,
+ .cls = &clocksource_sysclass,
+};
+
+static int __init init_clocksource_sysfs(void)
+{
+ int error = sysdev_class_register(&clocksource_sysclass);
+
+ if (!error)
+ error = sysdev_register(&device_clocksource);
+ if (!error)
+ error = sysdev_create_file(
+ &device_clocksource,
+ &attr_current_clocksource);
+ if (!error)
+ error = sysdev_create_file(
+ &device_clocksource,
+ &attr_available_clocksource);
+ return error;
+}
+
+device_initcall(init_clocksource_sysfs);
+
+/**
+ * boot_override_clocksource - boot clock override
+ * @str: override name
+ *
+ * Takes a clocksource= boot argument and uses it
+ * as the clocksource override name.
+ */
+static int __init boot_override_clocksource(char* str)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clocksource_lock, flags);
+ if (str)
+ strlcpy(override_name, str, sizeof(override_name));
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+ return 1;
+}
+
+__setup("clocksource=", boot_override_clocksource);
+
+/**
+ * boot_override_clock - Compatibility layer for deprecated boot option
+ * @str: override name
+ *
+ * DEPRECATED! Takes a clock= boot argument and uses it
+ * as the clocksource override name
+ */
+static int __init boot_override_clock(char* str)
+{
+ if (!strcmp(str, "pmtmr")) {
+ printk("Warning: clock=pmtmr is deprecated. "
+ "Use clocksource=acpi_pm.\n");
+ return boot_override_clocksource("acpi_pm");
+ }
+ printk("Warning! clock= boot option is deprecated. "
+ "Use clocksource=xyz\n");
+ return boot_override_clocksource(str);
+}
+
+__setup("clock=", boot_override_clock);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
new file mode 100644
index 000000000000..126bb30c4afe
--- /dev/null
+++ b/kernel/time/jiffies.c
@@ -0,0 +1,73 @@
+/***********************************************************************
+* linux/kernel/time/jiffies.c
+*
+* This file contains the jiffies based clocksource.
+*
+* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+************************************************************************/
+#include <linux/clocksource.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+
+/* The Jiffies based clocksource is the lowest common
+ * denominator clock source which should function on
+ * all systems. It has the same coarse resolution as
+ * the timer interrupt frequency HZ and it suffers
+ * inaccuracies caused by missed or lost timer
+ * interrupts and the inability for the timer
+ * interrupt hardware to accuratly tick at the
+ * requested HZ value. It is also not reccomended
+ * for "tick-less" systems.
+ */
+#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
+
+/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
+ * conversion, the .shift value could be zero. However
+ * this would make NTP adjustments impossible as they are
+ * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
+ * shift both the nominator and denominator the same
+ * amount, and give ntp adjustments in units of 1/2^8
+ *
+ * The value 8 is somewhat carefully chosen, as anything
+ * larger can result in overflows. NSEC_PER_JIFFY grows as
+ * HZ shrinks, so values greater then 8 overflow 32bits when
+ * HZ=100.
+ */
+#define JIFFIES_SHIFT 8
+
+static cycle_t jiffies_read(void)
+{
+ return (cycle_t) jiffies;
+}
+
+struct clocksource clocksource_jiffies = {
+ .name = "jiffies",
+ .rating = 0, /* lowest rating*/
+ .read = jiffies_read,
+ .mask = 0xffffffff, /*32bits*/
+ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+ .shift = JIFFIES_SHIFT,
+ .is_continuous = 0, /* tick based, not free running */
+};
+
+static int __init init_jiffies_clocksource(void)
+{
+ return clocksource_register(&clocksource_jiffies);
+}
+
+module_init(init_jiffies_clocksource);
diff --git a/kernel/timer.c b/kernel/timer.c
index eb97371b87d8..5a8960253063 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -597,7 +597,6 @@ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
long time_precision = 1; /* clock precision (us) */
long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-static long time_phase; /* phase offset (scaled us) */
long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
/* frequency offset (scaled ppm)*/
static long time_adj; /* tick adjust (scaled 1 / HZ) */
@@ -747,27 +746,14 @@ static long adjtime_adjustment(void)
}
/* in the NTP reference this is called "hardclock()" */
-static void update_wall_time_one_tick(void)
+static void update_ntp_one_tick(void)
{
- long time_adjust_step, delta_nsec;
+ long time_adjust_step;
time_adjust_step = adjtime_adjustment();
if (time_adjust_step)
/* Reduce by this step the amount of time left */
time_adjust -= time_adjust_step;
- delta_nsec = tick_nsec + time_adjust_step * 1000;
- /*
- * Advance the phase, once it gets to one microsecond, then
- * advance the tick more.
- */
- time_phase += time_adj;
- if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
- long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
- time_phase -= ltemp << (SHIFT_SCALE - 10);
- delta_nsec += ltemp;
- }
- xtime.tv_nsec += delta_nsec;
- time_interpolator_update(delta_nsec);
/* Changes by adjtime() do not take effect till next tick. */
if (time_next_adjust != 0) {
@@ -780,36 +766,378 @@ static void update_wall_time_one_tick(void)
* Return how long ticks are at the moment, that is, how much time
* update_wall_time_one_tick will add to xtime next time we call it
* (assuming no calls to do_adjtimex in the meantime).
- * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
- * bits to the right of the binary point.
+ * The return value is in fixed-point nanoseconds shifted by the
+ * specified number of bits to the right of the binary point.
* This function has no side-effects.
*/
u64 current_tick_length(void)
{
long delta_nsec;
+ u64 ret;
+ /* calculate the finest interval NTP will allow.
+ * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
+ */
delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
- return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
+ ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
+ ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
+
+ return ret;
}
-/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
+/* XXX - all of this timekeeping code should be later moved to time.c */
+#include <linux/clocksource.h>
+static struct clocksource *clock; /* pointer to current clocksource */
+
+#ifdef CONFIG_GENERIC_TIME
+/**
+ * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ *
+ * private function, must hold xtime_lock lock when being
+ * called. Returns the number of nanoseconds since the
+ * last call to update_wall_time() (adjusted by NTP scaling)
+ */
+static inline s64 __get_nsec_offset(void)
+{
+ cycle_t cycle_now, cycle_delta;
+ s64 ns_offset;
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ ns_offset = cyc2ns(clock, cycle_delta);
+
+ return ns_offset;
+}
+
+/**
+ * __get_realtime_clock_ts - Returns the time of day in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the time of day in a timespec. Used by
+ * do_gettimeofday() and get_realtime_clock_ts().
*/
-static void update_wall_time(unsigned long ticks)
+static inline void __get_realtime_clock_ts(struct timespec *ts)
{
+ unsigned long seq;
+ s64 nsecs;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ *ts = xtime;
+ nsecs = __get_nsec_offset();
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ timespec_add_ns(ts, nsecs);
+}
+
+/**
+ * getnstimeofday - Returns the time of day in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the time of day in a timespec.
+ */
+void getnstimeofday(struct timespec *ts)
+{
+ __get_realtime_clock_ts(ts);
+}
+
+EXPORT_SYMBOL(getnstimeofday);
+
+/**
+ * do_gettimeofday - Returns the time of day in a timeval
+ * @tv: pointer to the timeval to be set
+ *
+ * NOTE: Users should be converted to using get_realtime_clock_ts()
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ struct timespec now;
+
+ __get_realtime_clock_ts(&now);
+ tv->tv_sec = now.tv_sec;
+ tv->tv_usec = now.tv_nsec/1000;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+/**
+ * do_settimeofday - Sets the time of day
+ * @tv: pointer to the timespec variable containing the new time
+ *
+ * Sets the time of day to the new time and update NTP and notify hrtimers
+ */
+int do_settimeofday(struct timespec *tv)
+{
+ unsigned long flags;
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+ nsec -= __get_nsec_offset();
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ ntp_clear();
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/**
+ * change_clocksource - Swaps clocksources if a new one is available
+ *
+ * Accumulates current time interval and initializes new clocksource
+ */
+static int change_clocksource(void)
+{
+ struct clocksource *new;
+ cycle_t now;
+ u64 nsec;
+ new = clocksource_get_next();
+ if (clock != new) {
+ now = clocksource_read(new);
+ nsec = __get_nsec_offset();
+ timespec_add_ns(&xtime, nsec);
+
+ clock = new;
+ clock->cycle_last = now;
+ printk(KERN_INFO "Time: %s clocksource has been installed.\n",
+ clock->name);
+ return 1;
+ } else if (clock->update_callback) {
+ return clock->update_callback();
+ }
+ return 0;
+}
+#else
+#define change_clocksource() (0)
+#endif
+
+/**
+ * timeofday_is_continuous - check to see if timekeeping is free running
+ */
+int timekeeping_is_continuous(void)
+{
+ unsigned long seq;
+ int ret;
+
do {
- ticks--;
- update_wall_time_one_tick();
- if (xtime.tv_nsec >= 1000000000) {
- xtime.tv_nsec -= 1000000000;
+ seq = read_seqbegin(&xtime_lock);
+
+ ret = clock->is_continuous;
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ return ret;
+}
+
+/*
+ * timekeeping_init - Initializes the clocksource and common timekeeping values
+ */
+void __init timekeeping_init(void)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ clock = clocksource_get_next();
+ clocksource_calculate_interval(clock, tick_nsec);
+ clock->cycle_last = clocksource_read(clock);
+ ntp_clear();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+
+
+/*
+ * timekeeping_resume - Resumes the generic timekeeping subsystem.
+ * @dev: unused
+ *
+ * This is for the generic clocksource timekeeping.
+ * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
+ * still managed by arch specific suspend/resume code.
+ */
+static int timekeeping_resume(struct sys_device *dev)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ /* restart the last cycle value */
+ clock->cycle_last = clocksource_read(clock);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ return 0;
+}
+
+/* sysfs resume/suspend bits for timekeeping */
+static struct sysdev_class timekeeping_sysclass = {
+ .resume = timekeeping_resume,
+ set_kset_name("timekeeping"),
+};
+
+static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timekeeping_sysclass,
+};
+
+static int __init timekeeping_init_device(void)
+{
+ int error = sysdev_class_register(&timekeeping_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+}
+
+device_initcall(timekeeping_init_device);
+
+/*
+ * If the error is already larger, we look ahead another tick,
+ * to compensate for late or lost adjustments.
+ */
+static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset)
+{
+ int adj;
+
+ /*
+ * As soon as the machine is synchronized to the external time
+ * source this should be the common case.
+ */
+ error >>= 2;
+ if (likely(sign > 0 ? error <= *interval : error >= *interval))
+ return sign;
+
+ /*
+ * An extra look ahead dampens the effect of the current error,
+ * which can grow quite large with continously late updates, as
+ * it would dominate the adjustment value and can lead to
+ * oscillation.
+ */
+ error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
+ error -= clock->xtime_interval >> 1;
+
+ adj = 0;
+ while (1) {
+ error >>= 1;
+ if (sign > 0 ? error <= *interval : error >= *interval)
+ break;
+ adj++;
+ }
+
+ /*
+ * Add the current adjustments to the error and take the offset
+ * into account, the latter can cause the error to be hardly
+ * reduced at the next tick. Check the error again if there's
+ * room for another adjustment, thus further reducing the error
+ * which otherwise had to be corrected at the next update.
+ */
+ error = (error << 1) - *interval + *offset;
+ if (sign > 0 ? error > *interval : error < *interval)
+ adj++;
+
+ *interval <<= adj;
+ *offset <<= adj;
+ return sign << adj;
+}
+
+/*
+ * Adjust the multiplier to reduce the error value,
+ * this is optimized for the most common adjustments of -1,0,1,
+ * for other values we can do a bit more work.
+ */
+static void clocksource_adjust(struct clocksource *clock, s64 offset)
+{
+ s64 error, interval = clock->cycle_interval;
+ int adj;
+
+ error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
+ if (error > interval) {
+ adj = clocksource_bigadjust(1, error, &interval, &offset);
+ } else if (error < -interval) {
+ interval = -interval;
+ offset = -offset;
+ adj = clocksource_bigadjust(-1, error, &interval, &offset);
+ } else
+ return;
+
+ clock->mult += adj;
+ clock->xtime_interval += interval;
+ clock->xtime_nsec -= offset;
+ clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
+}
+
+/*
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ * Called from the timer interrupt, must hold a write on xtime_lock.
+ */
+static void update_wall_time(void)
+{
+ cycle_t offset;
+
+ clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+
+#ifdef CONFIG_GENERIC_TIME
+ offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
+#else
+ offset = clock->cycle_interval;
+#endif
+
+ /* normally this loop will run just once, however in the
+ * case of lost or late ticks, it will accumulate correctly.
+ */
+ while (offset >= clock->cycle_interval) {
+ /* accumulate one interval */
+ clock->xtime_nsec += clock->xtime_interval;
+ clock->cycle_last += clock->cycle_interval;
+ offset -= clock->cycle_interval;
+
+ if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
+ clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++;
second_overflow();
}
- } while (ticks);
+
+ /* interpolator bits */
+ time_interpolator_update(clock->xtime_interval
+ >> clock->shift);
+ /* increment the NTP state machine */
+ update_ntp_one_tick();
+
+ /* accumulate error between NTP and clock interval */
+ clock->error += current_tick_length();
+ clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
+ }
+
+ /* correct the clock when NTP error is too big */
+ clocksource_adjust(clock, offset);
+
+ /* store full nanoseconds into xtime */
+ xtime.tv_nsec = clock->xtime_nsec >> clock->shift;
+ clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+
+ /* check to see if there is a new clocksource to use */
+ if (change_clocksource()) {
+ clock->error = 0;
+ clock->xtime_nsec = 0;
+ clocksource_calculate_interval(clock, tick_nsec);
+ }
}
/*
@@ -915,10 +1243,8 @@ static inline void update_times(void)
unsigned long ticks;
ticks = jiffies - wall_jiffies;
- if (ticks) {
- wall_jiffies += ticks;
- update_wall_time(ticks);
- }
+ wall_jiffies += ticks;
+ update_wall_time();
calc_load(ticks);
}
@@ -1326,7 +1652,7 @@ static void __devinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int timer_cpu_notify(struct notifier_block *self,
+static int __devinit timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1346,7 +1672,7 @@ static int timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block timers_nb = {
+static struct notifier_block __devinitdata timers_nb = {
.notifier_call = timer_cpu_notify,
};
diff --git a/kernel/unwind.c b/kernel/unwind.c
new file mode 100644
index 000000000000..f69c804c8e62
--- /dev/null
+++ b/kernel/unwind.c
@@ -0,0 +1,918 @@
+/*
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ * This code is released under version 2 of the GNU GPL.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/unwind.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/stop_machine.h>
+#include <asm/sections.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+extern char __start_unwind[], __end_unwind[];
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+ UNW_REGISTER_INFO
+};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ struct unwind_table *link;
+ const char *name;
+} root_table, *last_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static void init_unwind_table(struct unwind_table *table,
+ const char *name,
+ const void *core_start,
+ unsigned long core_size,
+ const void *init_start,
+ unsigned long init_size,
+ const void *table_start,
+ unsigned long table_size)
+{
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel",
+ _text, _end - _text,
+ NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind);
+}
+
+#ifdef CONFIG_MODULES
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module,
+ const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size);
+
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info
+{
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table; prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+ stop_machine_run(unlink_table, &info, NR_CPUS);
+
+ if (info.table)
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t)(*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t)(*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch(ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if(ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p16s++);
+ else
+ value = get_unaligned(ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if(ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned(ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed
+ ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch(ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+ /* check if augmentation string is nul-terminated */
+ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
+ return -1;
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch(*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P': {
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType) || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native|DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ return delta > 0;
+}
+
+static void set_rule(uleb128_t reg,
+ enum item_location where,
+ uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+ }
+}
+
+static int processCFI(const u8 *start,
+ const u8 *end,
+ unsigned long targetLoc,
+ signed ptrType,
+ struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end; ) {
+ switch(*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ switch(*ptr.p8++) {
+ case DW_CFA_nop:
+ break;
+ case DW_CFA_set_loc:
+ if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0)
+ result = 0;
+ break;
+ case DW_CFA_advance_loc1:
+ result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ result = ptr.p8 <= end + 2
+ && advance_loc(*ptr.p16++, state);
+ break;
+ case DW_CFA_advance_loc4:
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value, get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory, get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value, get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ case DW_CFA_undefined:
+ case DW_CFA_same_value:
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state);
+ break;
+ case DW_CFA_register:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label = state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
+ memset(state->regs, 0, sizeof(state->regs));
+ state->stackDepth = 0;
+ result = processCFI(start, end, 0, ptrType, state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t)0 - get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
+ break;
+ case 3:
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result
+ && ptr.p8 == end
+ && (targetLoc == 0
+ || (/*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc &&*/ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ struct unwind_table *table;
+ struct unwind_state state;
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+ if ((table = find_table(UNW_PC(frame))) != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ unsigned long tableSize = table->size;
+
+ for (fde = table->address;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ break;
+ if (!fde[1])
+ continue; /* this is a CIE */
+ if ((fde[1] & (sizeof(*fde) - 1))
+ || fde[1] > (unsigned long)(fde + 1)
+ - (unsigned long)table->address)
+ continue; /* this is not a valid FDE */
+ cie = fde + 1 - fde[1] / sizeof(*fde);
+ if (*cie <= sizeof(*cie) + 4
+ || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || cie[1]
+ || (ptrType = fde_pointer_type(cie)) < 0) {
+ cie = NULL; /* this is not a (valid) CIE */
+ continue;
+ }
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType);
+ endLoc = startLoc
+ + read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType & DW_EH_PE_indirect
+ ? ptrType
+ : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed));
+ if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc)
+ break;
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr == 'z') {
+ /* check for ignorable (or already handled)
+ * nul-terminated augmentation string */
+ while (++ptr < end && *ptr)
+ if (strchr("LPR", *ptr) == NULL)
+ break;
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z')
+ ptr += get_uleb128(&ptr, end);
+ if (ptr > end
+ || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width != sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+ top = STACK_TOP(frame->task);
+ bottom = STACK_BOTTOM(frame->task);
+# if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top
+ && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+# else
+ if (UNW_SP(frame) > top
+ && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+# endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link,
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_LINK_OFFSET))
+# if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+# else
+ && link > UNW_FP(frame) && link < bottom
+# endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET))) {
+ UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
+# if FRAME_RETADDR_OFFSET < 0
+ -
+# else
+ +
+# endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+ /* process instructions */
+ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state)
+ || state.loc > endLoc
+ || state.regs[retAddrReg].where == Nowhere
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+ /* update frame */
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min((unsigned long)UNW_SP(frame), cfa);
+ endLoc = max((unsigned long)UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+#ifndef CONFIG_64BIT
+# define CASES CASE(8); CASE(16); CASE(32)
+#else
+# define CASES CASE(8); CASE(16); CASE(32); CASE(64)
+#endif
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch(state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width > reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch(reg_info[state.regs[i].value].width) {
+#define CASE(n) \
+ case sizeof(u##n): \
+ state.regs[i].value = FRAME_REG(state.regs[i].value, \
+ const u##n); \
+ break
+ CASES;
+#undef CASE
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i))
+ continue;
+ switch(state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch(reg_info[i].width) {
+#define CASE(n) case sizeof(u##n): \
+ FRAME_REG(i, u##n) = state.regs[i].value; \
+ break
+ CASES;
+#undef CASE
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory: {
+ unsigned long addr = cfa + state.regs[i].value
+ * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+ switch(reg_info[i].width) {
+#define CASE(n) case sizeof(u##n): \
+ __get_user(FRAME_REG(i, u##n), (u##n *)addr); \
+ break
+ CASES;
+#undef CASE
+ default:
+ return -EIO;
+ }
+ }
+ break;
+ }
+ }
+
+ return 0;
+#undef CASES
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(unwind);
+
+int unwind_init_frame_info(struct unwind_frame_info *info,
+ struct task_struct *tsk,
+ /*const*/ struct pt_regs *regs)
+{
+ info->task = tsk;
+ arch_unw_init_frame_info(info, regs);
+
+ return 0;
+}
+EXPORT_SYMBOL(unwind_init_frame_info);
+
+/*
+ * Prepare to unwind a blocked task.
+ */
+int unwind_init_blocked(struct unwind_frame_info *info,
+ struct task_struct *tsk)
+{
+ info->task = tsk;
+ arch_unw_init_blocked(info);
+
+ return 0;
+}
+EXPORT_SYMBOL(unwind_init_blocked);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+int unwind_init_running(struct unwind_frame_info *info,
+ asmlinkage int (*callback)(struct unwind_frame_info *,
+ void *arg),
+ void *arg)
+{
+ info->task = current;
+
+ return arch_unwind_init_running(info, callback, arg);
+}
+EXPORT_SYMBOL(unwind_init_running);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+int unwind_to_user(struct unwind_frame_info *info)
+{
+ while (!arch_unw_user_mode(info)) {
+ int err = unwind(info);
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(unwind_to_user);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 565cf7a1febd..59f0b42bd89e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
}
/* We're holding the cpucontrol mutex here */
-static int workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/lib/Kconfig b/lib/Kconfig
index 3de93357f5ab..f6299342b882 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -86,4 +86,10 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+#
+# plist support is select#ed if needed
+#
+config PLIST
+ boolean
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ccb0c1fdf1b5..e4fcbd12cf6e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -23,6 +23,22 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config UNUSED_SYMBOLS
+ bool "Enable unused/obsolete exported symbols"
+ default y if X86
+ help
+ Unused but exported symbols make the kernel needlessly bigger. For
+ that reason most of these unused exports will soon be removed. This
+ option is provided temporarily to provide a transition period in case
+ some external kernel module needs one of these symbols anyway. If you
+ encounter such a case in your module, consider if you are actually
+ using the right API. (rationale: since nobody in the kernel is using
+ this in a module, there is a pretty good chance it's actually the
+ wrong interface to use). If you really need the symbol, please send a
+ mail to the linux kernel mailing list mentioning the symbol and why
+ you really need it, and what the merge plan to the mainline kernel for
+ your module is.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -107,6 +123,24 @@ config DEBUG_MUTEXES
This allows mutex semantics violations and mutex related deadlocks
(lockups) to be detected and reported automatically.
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_PI_LIST
+ bool
+ default y
+ depends on DEBUG_RT_MUTEXES
+
+config RT_MUTEX_TESTER
+ bool "Built-in scriptable tester for rt-mutexes"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This option enables a rt-mutex tester.
+
config DEBUG_SPINLOCK
bool "Spinlock debugging"
depends on DEBUG_KERNEL
@@ -188,14 +222,22 @@ config FRAME_POINTER
config UNWIND_INFO
bool "Compile the kernel with frame unwind information"
- depends on !IA64
- depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850)
+ depends on !IA64 && !PARISC
+ depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
help
If you say Y here the resulting kernel image will be slightly larger
but not slower, and it will give very useful debugging information.
If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame unwind information or frame pointers.
+config STACK_UNWIND
+ bool "Stack unwind support"
+ depends on UNWIND_INFO
+ depends on X86
+ help
+ This enables more precise stack traces, omitting all unrelated
+ occurrences of pointers into kernel code from the dump.
+
config FORCED_INLINING
bool "Force gcc to inline functions marked 'inline'"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 79358ad1f113..10c13c9d7824 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,7 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
diff --git a/lib/idr.c b/lib/idr.c
index de19030a999b..4d096819511a 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/module.h>
#endif
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h>
@@ -398,6 +399,48 @@ void *idr_find(struct idr *idp, int id)
}
EXPORT_SYMBOL(idr_find);
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+ int n;
+ struct idr_layer *p, *old_p;
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+
+ id &= MAX_ID_MASK;
+
+ if (id >= (1 << n))
+ return ERR_PTR(-EINVAL);
+
+ n -= IDR_BITS;
+ while ((n > 0) && p) {
+ p = p->ary[(id >> n) & IDR_MASK];
+ n -= IDR_BITS;
+ }
+
+ n = id & IDR_MASK;
+ if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ return ERR_PTR(-ENOENT);
+
+ old_p = p->ary[n];
+ p->ary[n] = ptr;
+
+ return old_p;
+}
+EXPORT_SYMBOL(idr_replace);
+
static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
unsigned long flags)
{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cb5490ec00f2..e713e86811ae 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -14,7 +14,7 @@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 000000000000..3074a02272f3
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,118 @@
+/*
+ * lib/plist.c
+ *
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This file contains the add / del functions which are considered to
+ * be too large to inline. See include/linux/plist.h for further
+ * information.
+ */
+
+#include <linux/plist.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_PI_LIST
+
+static void plist_check_prev_next(struct list_head *t, struct list_head *p,
+ struct list_head *n)
+{
+ if (n->prev != p || p->next != n) {
+ printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
+ printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
+ printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
+ WARN_ON(1);
+ }
+}
+
+static void plist_check_list(struct list_head *top)
+{
+ struct list_head *prev = top, *next = top->next;
+
+ plist_check_prev_next(top, prev, next);
+ while (next != top) {
+ prev = next;
+ next = prev->next;
+ plist_check_prev_next(top, prev, next);
+ }
+}
+
+static void plist_check_head(struct plist_head *head)
+{
+ WARN_ON(!head->lock);
+ if (head->lock)
+ WARN_ON_SMP(!spin_is_locked(head->lock));
+ plist_check_list(&head->prio_list);
+ plist_check_list(&head->node_list);
+}
+
+#else
+# define plist_check_head(h) do { } while (0)
+#endif
+
+/**
+ * plist_add - add @node to @head
+ *
+ * @node: &struct plist_node pointer
+ * @head: &struct plist_head pointer
+ */
+void plist_add(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+
+ plist_check_head(head);
+ WARN_ON(!plist_node_empty(node));
+
+ list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
+ if (node->prio < iter->prio)
+ goto lt_prio;
+ else if (node->prio == iter->prio) {
+ iter = list_entry(iter->plist.prio_list.next,
+ struct plist_node, plist.prio_list);
+ goto eq_prio;
+ }
+ }
+
+lt_prio:
+ list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
+eq_prio:
+ list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_del - Remove a @node from plist.
+ *
+ * @node: &struct plist_node pointer - entry to be removed
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_del(struct plist_node *node, struct plist_head *head)
+{
+ plist_check_head(head);
+
+ if (!list_empty(&node->plist.prio_list)) {
+ struct plist_node *next = plist_first(&node->plist);
+
+ list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
+ list_del_init(&node->plist.prio_list);
+ }
+
+ list_del_init(&node->plist.node_list);
+
+ plist_check_head(head);
+}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 797428afd111..bed7229378f2 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -489,7 +489,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
if (str < end)
*str = '\0';
else
- *end = '\0';
+ end[-1] = '\0';
}
/* the trailing null byte doesn't count towards the total */
return str-buf;
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 02a16eacb72d..d84560c076d8 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -63,10 +63,10 @@
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
+
+ - @start: inflate()'s starting value for strm->avail_out
*/
-void inflate_fast(strm, start)
-z_streamp strm;
-unsigned start; /* inflate()'s starting value for strm->avail_out */
+void inflate_fast(z_streamp strm, unsigned start)
{
struct inflate_state *state;
unsigned char *in; /* local strm->next_in */
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 62343c53bf7e..3fe6ce5b53e5 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -8,15 +8,6 @@
#define MAXBITS 15
-const char inflate_copyright[] =
- " inflate 1.2.3 Copyright 1995-2005 Mark Adler ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
/*
Build a set of tables to decode the provided canonical Huffman code.
The code lengths are lens[0..codes-1]. The result starts at *table,
@@ -29,13 +20,8 @@ const char inflate_copyright[] =
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
-int zlib_inflate_table(type, lens, codes, table, bits, work)
-codetype type;
-unsigned short *lens;
-unsigned codes;
-code **table;
-unsigned *bits;
-unsigned short *work;
+int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
+ code **table, unsigned *bits, unsigned short *work)
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */
diff --git a/mm/Kconfig b/mm/Kconfig
index 66e65ab39426..8f5b45615f7b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -115,7 +115,8 @@ config SPARSEMEM_EXTREME
# eventually, we can have this option just 'select SPARSEMEM'
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
- depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND
+ depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on (IA64 || X86 || PPC64)
comment "Memory hotplug is currently incompatible with Software Suspend"
depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
@@ -145,3 +146,9 @@ config MIGRATION
while the virtual addresses are not changed. This is useful for
example on NUMA systems to put pages nearer to the processors accessing
the page.
+
+config RESOURCES_64BIT
+ bool "64 bit Memory and IO resources (EXPERIMENTAL)" if (!64BIT && EXPERIMENTAL)
+ default 64BIT
+ help
+ This option allows memory and IO resources to be 64 bit.
diff --git a/mm/filemap.c b/mm/filemap.c
index 9c7334bafda8..648f2c0c8e18 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2069,7 +2069,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
- struct address_space_operations *a_ops = mapping->a_ops;
+ const struct address_space_operations *a_ops = mapping->a_ops;
struct inode *inode = mapping->host;
long status = 0;
struct page *page;
@@ -2095,14 +2095,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
do {
unsigned long index;
unsigned long offset;
- unsigned long maxlen;
size_t copied;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
+
+ /* Limit the size of the copy to the caller's write size */
+ bytes = min(bytes, count);
+
+ /*
+ * Limit the size of the copy to that of the current segment,
+ * because fault_in_pages_readable() doesn't know how to walk
+ * segments.
+ */
+ bytes = min(bytes, cur_iov->iov_len - iov_base);
/*
* Bring in the user page that we will copy from _first_.
@@ -2110,10 +2117,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* same page as we're writing to, without it being marked
* up-to-date.
*/
- maxlen = cur_iov->iov_len - iov_base;
- if (maxlen > bytes)
- maxlen = bytes;
- fault_in_pages_readable(buf, maxlen);
+ fault_in_pages_readable(buf, bytes);
page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
if (!page) {
@@ -2121,6 +2125,12 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
break;
}
+ if (unlikely(bytes == 0)) {
+ status = 0;
+ copied = 0;
+ goto zero_length_segment;
+ }
+
status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) {
loff_t isize = i_size_read(inode);
@@ -2150,7 +2160,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
page_cache_release(page);
continue;
}
- if (likely(copied > 0)) {
+zero_length_segment:
+ if (likely(copied >= 0)) {
if (!status)
status = copied;
@@ -2215,7 +2226,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
struct file *file = iocb->ki_filp;
- struct address_space * mapping = file->f_mapping;
+ const struct address_space * mapping = file->f_mapping;
size_t ocount; /* original count */
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
diff --git a/mm/filemap.h b/mm/filemap.h
index 536979fb4ba7..3f2a343c6015 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -88,7 +88,7 @@ filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
const struct iovec *iov = *iovp;
size_t base = *basep;
- while (bytes) {
+ do {
int copy = min(bytes, iov->iov_len - base);
bytes -= copy;
@@ -97,7 +97,7 @@ filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
iov++;
base = 0;
}
- }
+ } while (bytes);
*iovp = iov;
*basep = base;
}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index b960ac8e5918..b4fd0d7c9bfb 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -273,7 +273,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
size_t count, loff_t pos, loff_t *ppos)
{
struct address_space * mapping = filp->f_mapping;
- struct address_space_operations *a_ops = mapping->a_ops;
+ const struct address_space_operations *a_ops = mapping->a_ops;
struct inode *inode = mapping->host;
long status = 0;
struct page *page;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 841a077d5aeb..ea4038838b0a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -21,6 +21,7 @@
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
+#include <linux/ioport.h>
#include <asm/tlbflush.h>
@@ -126,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
unsigned long i;
unsigned long flags;
unsigned long onlined_pages = 0;
+ struct resource res;
+ u64 section_end;
+ unsigned long start_pfn;
struct zone *zone;
int need_zonelists_rebuild = 0;
@@ -148,10 +152,27 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pfn_to_page(pfn + i);
- online_page(page);
- onlined_pages++;
+ res.start = (u64)pfn << PAGE_SHIFT;
+ res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
+ res.flags = IORESOURCE_MEM; /* we just need system ram */
+ section_end = res.end;
+
+ while (find_next_system_ram(&res) >= 0) {
+ start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
+ nr_pages = (unsigned long)
+ ((res.end + 1 - res.start) >> PAGE_SHIFT);
+
+ if (PageReserved(pfn_to_page(start_pfn))) {
+ /* this region's page is not onlined now */
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pfn_to_page(start_pfn + i);
+ online_page(page);
+ onlined_pages++;
+ }
+ }
+
+ res.start = res.end + 1;
+ res.end = section_end;
}
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
@@ -163,3 +184,100 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
vm_total_pages = nr_free_pagecache_pages();
return 0;
}
+
+static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
+{
+ struct pglist_data *pgdat;
+ unsigned long zones_size[MAX_NR_ZONES] = {0};
+ unsigned long zholes_size[MAX_NR_ZONES] = {0};
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+
+ pgdat = arch_alloc_nodedata(nid);
+ if (!pgdat)
+ return NULL;
+
+ arch_refresh_nodedata(nid, pgdat);
+
+ /* we can use NODE_DATA(nid) from here */
+
+ /* init node's zones as empty zones, we don't have any present pages.*/
+ free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
+
+ return pgdat;
+}
+
+static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
+{
+ arch_refresh_nodedata(nid, NULL);
+ arch_free_nodedata(pgdat);
+ return;
+}
+
+/* add this memory to iomem resource */
+static void register_memory_resource(u64 start, u64 size)
+{
+ struct resource *res;
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(!res);
+
+ res->name = "System RAM";
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (request_resource(&iomem_resource, res) < 0) {
+ printk("System RAM resource %llx - %llx cannot be added\n",
+ (unsigned long long)res->start, (unsigned long long)res->end);
+ kfree(res);
+ }
+}
+
+
+
+int add_memory(int nid, u64 start, u64 size)
+{
+ pg_data_t *pgdat = NULL;
+ int new_pgdat = 0;
+ int ret;
+
+ if (!node_online(nid)) {
+ pgdat = hotadd_new_pgdat(nid, start);
+ if (!pgdat)
+ return -ENOMEM;
+ new_pgdat = 1;
+ ret = kswapd_run(nid);
+ if (ret)
+ goto error;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, start, size);
+
+ if (ret < 0)
+ goto error;
+
+ /* we online node here. we can't roll back from here. */
+ node_set_online(nid);
+
+ if (new_pgdat) {
+ ret = register_one_node(nid);
+ /*
+ * If sysfs file of new node can't create, cpu on the node
+ * can't be hot-added. There is no rollback way now.
+ * So, check by BUG_ON() to catch it reluctantly..
+ */
+ BUG_ON(ret);
+ }
+
+ /* register this memory as resource */
+ register_memory_resource(start, size);
+
+ return ret;
+error:
+ /* rollback pgdat allocation and others */
+ if (new_pgdat)
+ rollback_node_hotadd(nid, pgdat);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(add_memory);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 73e0f23b7f51..6b9740bbf4c0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1821,7 +1821,7 @@ static inline void check_huge_range(struct vm_area_struct *vma,
int show_numa_map(struct seq_file *m, void *v)
{
- struct task_struct *task = m->private;
+ struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
struct numa_maps *md;
struct file *file = vma->vm_file;
@@ -1837,7 +1837,7 @@ int show_numa_map(struct seq_file *m, void *v)
return 0;
mpol_to_str(buffer, sizeof(buffer),
- get_vma_policy(task, vma, vma->vm_start));
+ get_vma_policy(priv->task, vma, vma->vm_start));
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
@@ -1891,7 +1891,7 @@ out:
kfree(md);
if (m->count < m->size)
- m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
return 0;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8ccf6f1b1473..4ec7026c7bab 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -516,14 +516,14 @@ static void set_ratelimit(void)
ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}
-static int
+static int __cpuinit
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
set_ratelimit();
return 0;
}
-static struct notifier_block ratelimit_nb = {
+static struct notifier_block __cpuinitdata ratelimit_nb = {
.notifier_call = ratelimit_handler,
.next = NULL,
};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6c1174fcf52c..084a2de7e52a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,7 @@ static inline void rmv_page_order(struct page *page)
* satisfies the following equation:
* P = B & ~(1 << O)
*
- * Assumption: *_mem_map is contigious at least up to MAX_ORDER
+ * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
*/
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
@@ -446,8 +446,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
arch_free_page(page, order);
if (!PageHighMem(page))
- mutex_debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE<<order);
+ debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE<<order);
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -2031,7 +2031,7 @@ static int pageset_cpuup_callback(struct notifier_block *nfb,
return ret;
}
-static struct notifier_block pageset_notifier =
+static struct notifier_block __cpuinitdata pageset_notifier =
{ &pageset_cpuup_callback, NULL, 0 };
void __init setup_per_cpu_pageset(void)
diff --git a/mm/readahead.c b/mm/readahead.c
index e39e416860d7..aa7ec424656a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -390,8 +390,8 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
* Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
* is set wait till the read completes. Otherwise attempt to read without
* blocking.
- * Returns 1 meaning 'success' if read is succesfull without switching off
- * readhaead mode. Otherwise return failure.
+ * Returns 1 meaning 'success' if read is successful without switching off
+ * readahead mode. Otherwise return failure.
*/
static int
blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/mm/shmem.c b/mm/shmem.c
index 38bc3334f263..ea64c07cbe72 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -174,7 +174,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
}
static struct super_operations shmem_ops;
-static struct address_space_operations shmem_aops;
+static const struct address_space_operations shmem_aops;
static struct file_operations shmem_file_operations;
static struct inode_operations shmem_inode_operations;
static struct inode_operations shmem_dir_inode_operations;
@@ -2162,7 +2162,7 @@ static void destroy_inodecache(void)
printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
}
-static struct address_space_operations shmem_aops = {
+static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_nobuffers,
#ifdef CONFIG_TMPFS
diff --git a/mm/slab.c b/mm/slab.c
index 98ac20bc0de9..233e39d14caf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -89,6 +89,7 @@
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -106,6 +107,7 @@
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
+#include <linux/rtmutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -492,17 +494,6 @@ struct kmem_cache {
#endif
#if DEBUG
-/*
- * Magic nums for obj red zoning.
- * Placed in the first word before and the first word after an obj.
- */
-#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
-#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
-
-/* ...and for poisoning */
-#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
-#define POISON_FREE 0x6b /* for use-after-free poisoning */
-#define POISON_END 0xa5 /* end-byte of poisoning */
/*
* memory layout of objects:
@@ -1083,7 +1074,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
#endif
-static int cpuup_callback(struct notifier_block *nfb,
+static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1265,7 +1256,9 @@ bad:
return NOTIFY_BAD;
}
-static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
+static struct notifier_block __cpuinitdata cpucache_notifier = {
+ &cpuup_callback, NULL, 0
+};
/*
* swap the static kmem_list3 with kmalloced memory
@@ -3405,7 +3398,7 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
- mutex_debug_check_no_locks_freed(objp, obj_size(c));
+ debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
diff --git a/mm/sparse.c b/mm/sparse.c
index e0a3fe48aa37..c7a2b3a0e46b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -45,7 +45,7 @@ static struct mem_section *sparse_index_alloc(int nid)
static int sparse_index_init(unsigned long section_nr, int nid)
{
- static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(index_init_lock);
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
int ret = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 03ae2076f92f..990868afc1c6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -86,8 +86,7 @@ int rotate_reclaimable_page(struct page *page)
zone = page_zone(page);
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page) && !PageActive(page)) {
- list_del(&page->lru);
- list_add_tail(&page->lru, &zone->inactive_list);
+ list_move_tail(&page->lru, &zone->inactive_list);
inc_page_state(pgrotated);
}
if (!test_clear_page_writeback(page))
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e0e1583f32c2..7535211bb495 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -24,7 +24,7 @@
* vmscan's shrink_list, to make sync_page look nicer, and to allow
* future use of radix_tree tags in the swap cache.
*/
-static struct address_space_operations swap_aops = {
+static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.sync_page = block_sync_page,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 72babac71dea..eeacb0d695c3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -1223,7 +1224,6 @@ static int kswapd(void *p)
};
cpumask_t cpumask;
- daemonize("kswapd%d", pgdat->node_id);
cpumask = node_to_cpumask(pgdat->node_id);
if (!cpus_empty(cpumask))
set_cpus_allowed(tsk, cpumask);
@@ -1450,7 +1450,7 @@ out:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;
@@ -1468,20 +1468,35 @@ static int cpu_callback(struct notifier_block *nfb,
}
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * This kswapd start function will be called by init and node-hot-add.
+ * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
+ */
+int kswapd_run(int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int ret = 0;
+
+ if (pgdat->kswapd)
+ return 0;
+
+ pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ if (IS_ERR(pgdat->kswapd)) {
+ /* failure at boot is fatal */
+ BUG_ON(system_state == SYSTEM_BOOTING);
+ printk("Failed to start kswapd on node %d\n",nid);
+ ret = -1;
+ }
+ return ret;
+}
+
static int __init kswapd_init(void)
{
- pg_data_t *pgdat;
+ int nid;
swap_setup();
- for_each_online_pgdat(pgdat) {
- pid_t pid;
-
- pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
- BUG_ON(pid < 0);
- read_lock(&tasklist_lock);
- pgdat->kswapd = find_task_by_pid(pid);
- read_unlock(&tasklist_lock);
- }
+ for_each_online_node(nid)
+ kswapd_run(nid);
hotcpu_notifier(cpu_callback, 0);
return 0;
}
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index a48a5d580408..5fe77df00186 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1113,10 +1113,9 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
- unsigned char *ip;
-
uint32_t dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
+
dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip));
ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry);
if(entry == NULL){
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 74368f79ee5d..b81fad893328 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -480,12 +480,8 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len);
- if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- tty_buffer_request_room(tty, skb->len);
- tty_insert_flip_string(tty, skb->data, skb->len);
- tty_flip_buffer_push(tty);
- } else
- tty->ldisc.receive_buf(tty, skb->data, NULL, skb->len);
+ tty_insert_flip_string(tty, skb->data, skb->len);
+ tty_flip_buffer_push(tty);
kfree_skb(skb);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index ea2469398bd5..f1c52cbd6ef7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -230,7 +230,7 @@ extern void netdev_unregister_sysfs(struct net_device *);
* For efficiency
*/
-int netdev_nit;
+static int netdev_nit;
/*
* Add a protocol ID to the list. Now that the input handler is
@@ -1325,9 +1325,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
nskb->next = NULL;
rc = dev->hard_start_xmit(nskb, dev);
if (unlikely(rc)) {
+ nskb->next = skb->next;
skb->next = nskb;
return rc;
}
+ if (unlikely(netif_queue_stopped(dev) && skb->next))
+ return NETDEV_TX_BUSY;
} while (skb->next);
skb->destructor = DEV_GSO_CB(skb)->destructor;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9cb781830380..471da451cd48 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -54,6 +54,7 @@ static atomic_t trapped;
sizeof(struct iphdr) + sizeof(struct ethhdr))
static void zap_completion_queue(void);
+static void arp_reply(struct sk_buff *skb);
static void queue_process(void *p)
{
@@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np)
}
}
+static void service_arp_queue(struct netpoll_info *npi)
+{
+ struct sk_buff *skb;
+
+ if (unlikely(!npi))
+ return;
+
+ skb = skb_dequeue(&npi->arp_tx);
+
+ while (skb != NULL) {
+ arp_reply(skb);
+ skb = skb_dequeue(&npi->arp_tx);
+ }
+ return;
+}
+
void netpoll_poll(struct netpoll *np)
{
if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
@@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np)
if (np->dev->poll)
poll_napi(np);
+ service_arp_queue(np->dev->npinfo);
+
zap_completion_queue();
}
@@ -279,14 +298,10 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
* network drivers do not expect to be called if the queue is
* stopped.
*/
- if (netif_queue_stopped(np->dev)) {
- netif_tx_unlock(np->dev);
- netpoll_poll(np);
- udelay(50);
- continue;
- }
+ status = NETDEV_TX_BUSY;
+ if (!netif_queue_stopped(np->dev))
+ status = np->dev->hard_start_xmit(skb, np->dev);
- status = np->dev->hard_start_xmit(skb, np->dev);
netif_tx_unlock(np->dev);
/* success */
@@ -446,7 +461,9 @@ int __netpoll_rx(struct sk_buff *skb)
int proto, len, ulen;
struct iphdr *iph;
struct udphdr *uh;
- struct netpoll *np = skb->dev->npinfo->rx_np;
+ struct netpoll_info *npi = skb->dev->npinfo;
+ struct netpoll *np = npi->rx_np;
+
if (!np)
goto out;
@@ -456,7 +473,7 @@ int __netpoll_rx(struct sk_buff *skb)
/* check if netpoll clients need ARP */
if (skb->protocol == __constant_htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
- arp_reply(skb);
+ skb_queue_tail(&npi->arp_tx, skb);
return 1;
}
@@ -651,6 +668,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->poll_owner = -1;
npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
+ skb_queue_head_init(&npinfo->arp_tx);
} else
npinfo = ndev->npinfo;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e5044ba3ab6..6edbb90cbcec 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1739,12 +1739,15 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state)
{
+ unsigned int ret;
+
config->get_next_block = skb_ts_get_next_block;
config->finish = skb_ts_finish;
skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
- return textsearch_find(config, state);
+ ret = textsearch_find(config, state);
+ return (ret <= to - from ? ret : UINT_MAX);
}
/**
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0e029c4e2903..c04176be7ed1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2166,7 +2166,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
if (!pskb_may_pull(skb, thlen))
goto out;
- oldlen = ~htonl(skb->len);
+ oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
segs = skb_segment(skb, sg);
@@ -2174,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
goto out;
len = skb_shinfo(skb)->gso_size;
- delta = csum_add(oldlen, htonl(thlen + len));
+ delta = htonl(oldlen + (thlen + len));
skb = segs;
th = skb->h.th;
@@ -2183,10 +2183,10 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
do {
th->fin = th->psh = 0;
- if (skb->ip_summed == CHECKSUM_NONE) {
- th->check = csum_fold(csum_partial(
- skb->h.raw, thlen, csum_add(skb->csum, delta)));
- }
+ th->check = ~csum_fold(th->check + delta);
+ if (skb->ip_summed != CHECKSUM_HW)
+ th->check = csum_fold(csum_partial(skb->h.raw, thlen,
+ skb->csum));
seq += len;
skb = skb->next;
@@ -2196,11 +2196,11 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
th->cwr = 0;
} while (skb->next);
- if (skb->ip_summed == CHECKSUM_NONE) {
- delta = csum_add(oldlen, htonl(skb->tail - skb->h.raw));
- th->check = csum_fold(csum_partial(
- skb->h.raw, thlen, csum_add(skb->csum, delta)));
- }
+ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+ th->check = ~csum_fold(th->check + delta);
+ if (skb->ip_summed != CHECKSUM_HW)
+ th->check = csum_fold(csum_partial(skb->h.raw, thlen,
+ skb->csum));
out:
return segs;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8a777932786d..e728980160d2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
(strict & RT6_SELECT_F_REACHABLE) &&
last && last != rt0) {
/* no entries matched; do round-robin */
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
*head = rt0->u.next;
rt0->u.next = last->u.next;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b3b9097c87c7..c11737f472d6 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -725,15 +725,17 @@ void nr_link_failed(ax25_cb *ax25, int reason)
struct nr_node *nr_node = NULL;
spin_lock_bh(&nr_neigh_list_lock);
- nr_neigh_for_each(s, node, &nr_neigh_list)
+ nr_neigh_for_each(s, node, &nr_neigh_list) {
if (s->ax25 == ax25) {
nr_neigh_hold(s);
nr_neigh = s;
break;
}
+ }
spin_unlock_bh(&nr_neigh_list_lock);
- if (nr_neigh == NULL) return;
+ if (nr_neigh == NULL)
+ return;
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
@@ -743,11 +745,13 @@ void nr_link_failed(ax25_cb *ax25, int reason)
return;
}
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each(nr_node, node, &nr_node_list)
+ nr_node_for_each(nr_node, node, &nr_node_list) {
nr_node_lock(nr_node);
- if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh)
+ if (nr_node->which < nr_node->count &&
+ nr_node->routes[nr_node->which].neighbour == nr_neigh)
nr_node->which++;
nr_node_unlock(nr_node);
+ }
spin_unlock_bh(&nr_node_list_lock);
nr_neigh_put(nr_neigh);
}
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
index c4aeb7d40266..d07122b57e0d 100644
--- a/net/rxrpc/call.c
+++ b/net/rxrpc/call.c
@@ -1098,8 +1098,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
call->app_ready_seq = pmsg->seq;
call->app_ready_qty += pmsg->dsize;
- list_del_init(&pmsg->link);
- list_add_tail(&pmsg->link, &call->app_readyq);
+ list_move_tail(&pmsg->link, &call->app_readyq);
}
/* see if we've got the last packet yet */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 0e0a4553499f..573b572f8f91 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -402,8 +402,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
/* move to graveyard queue */
_debug("burying connection: {%08x}", ntohl(conn->conn_id));
- list_del(&conn->link);
- list_add_tail(&conn->link, &peer->conn_graveyard);
+ list_move_tail(&conn->link, &peer->conn_graveyard);
rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index 1aadd026d354..cea4eb5e2497 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -160,8 +160,7 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
msg = list_entry(_p, struct rxrpc_message, link);
if (msg->trans == trans) {
- list_del(&msg->link);
- list_add_tail(&msg->link, &tmp);
+ list_move_tail(&msg->link, &tmp);
atomic_dec(&rxrpc_krxsecd_qcount);
}
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 129e2bd36aff..b8714a87b34c 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -169,7 +169,7 @@ gss_import_sec_context_kerberos(const void *p,
}
ctx_id->internal_ctx_id = ctx;
- dprintk("RPC: Succesfully imported new context.\n");
+ dprintk("RPC: Successfully imported new context.\n");
return 0;
out_err_free_key2:
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index f43311221a72..2f312164d6d5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,7 +70,7 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(krb5_seq_lock);
u32
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 5bf11ccba7cd..3d0432aa45c1 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -201,7 +201,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
ctx_id->internal_ctx_id = ctx;
- dprintk("Succesfully imported new spkm context.\n");
+ dprintk("Successfully imported new spkm context.\n");
return 0;
out_err_free_key2:
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2c4ecbe50082..1bb75703f384 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -49,13 +49,19 @@
#include "name_table.h"
#include "bcast.h"
-
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
#define BCLINK_LOG_BUF_SIZE 0
+/*
+ * Loss rate for incoming broadcast frames; used to test retransmission code.
+ * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
+ */
+
+#define TIPC_BCAST_LOSS_RATE 0
+
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
@@ -75,7 +81,14 @@ struct bcbearer_pair {
* @bearer: (non-standard) broadcast bearer structure
* @media: (non-standard) broadcast media structure
* @bpairs: array of bearer pairs
- * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines. Concurrent access is
+ * prevented through use of the spinlock "bc_lock".
*/
struct bcbearer {
@@ -83,6 +96,8 @@ struct bcbearer {
struct media media;
struct bcbearer_pair bpairs[MAX_BEARERS];
struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+ struct node_map remains;
+ struct node_map remains_new;
};
/**
@@ -102,7 +117,7 @@ struct bclink {
static struct bcbearer *bcbearer = NULL;
static struct bclink *bclink = NULL;
static struct link *bcl = NULL;
-static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bc_lock);
char tipc_bclink_name[] = "multicast-link";
@@ -165,21 +180,18 @@ static int bclink_ack_allowed(u32 n)
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
*
- * Called with 'node' locked, bc_lock unlocked
+ * Called with bc_lock locked
*/
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
- spin_lock_bh(&bc_lock);
buf = bcl->first_out;
while (buf && less_eq(buf_seqno(buf), after)) {
buf = buf->next;
}
- if (buf != NULL)
- tipc_link_retransmit(bcl, buf, mod(to - after));
- spin_unlock_bh(&bc_lock);
+ tipc_link_retransmit(bcl, buf, mod(to - after));
}
/**
@@ -346,8 +358,10 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
for (; buf; buf = buf->next) {
u32 seqno = buf_seqno(buf);
- if (mod(seqno - prev) != 1)
+ if (mod(seqno - prev) != 1) {
buf = NULL;
+ break;
+ }
if (seqno == gap_after)
break;
prev = seqno;
@@ -399,7 +413,10 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
*/
void tipc_bclink_recv_pkt(struct sk_buff *buf)
-{
+{
+#if (TIPC_BCAST_LOSS_RATE)
+ static int rx_count = 0;
+#endif
struct tipc_msg *msg = buf_msg(buf);
struct node* node = tipc_node_find(msg_prevnode(msg));
u32 next_in;
@@ -420,9 +437,13 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
+ spin_lock_bh(&bc_lock);
bcl->stats.recv_nacks++;
+ bcl->owner->next = node; /* remember requestor */
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
+ bcl->owner->next = NULL;
+ spin_unlock_bh(&bc_lock);
} else {
tipc_bclink_peek_nack(msg_destnode(msg),
msg_bcast_tag(msg),
@@ -433,6 +454,14 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
return;
}
+#if (TIPC_BCAST_LOSS_RATE)
+ if (++rx_count == TIPC_BCAST_LOSS_RATE) {
+ rx_count = 0;
+ buf_discard(buf);
+ return;
+ }
+#endif
+
tipc_node_lock(node);
receive:
deferred = node->bclink.deferred_head;
@@ -531,12 +560,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
{
static int send_count = 0;
- struct node_map *remains;
- struct node_map *remains_new;
- struct node_map *remains_tmp;
int bp_index;
int swap_time;
- int err;
/* Prepare buffer for broadcasting (if first time trying to send it) */
@@ -557,9 +582,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
/* Send buffer over bearers until all targets reached */
- remains = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
- remains_new = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
- *remains = tipc_cltr_bcast_nodes;
+ bcbearer->remains = tipc_cltr_bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -568,8 +591,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (!p)
break; /* no more bearers to try */
- tipc_nmap_diff(remains, &p->nodes, remains_new);
- if (remains_new->count == remains->count)
+ tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
+ if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
if (!p->publ.blocked &&
@@ -587,27 +610,17 @@ swap:
bcbearer->bpairs[bp_index].primary = s;
bcbearer->bpairs[bp_index].secondary = p;
update:
- if (remains_new->count == 0) {
- err = TIPC_OK;
- goto out;
- }
+ if (bcbearer->remains_new.count == 0)
+ return TIPC_OK;
- /* swap map */
- remains_tmp = remains;
- remains = remains_new;
- remains_new = remains_tmp;
+ bcbearer->remains = bcbearer->remains_new;
}
/* Unable to reach all targets */
bcbearer->bearer.publ.blocked = 1;
bcl->stats.bearer_congs++;
- err = ~TIPC_OK;
-
- out:
- kfree(remains_new);
- kfree(remains);
- return err;
+ return ~TIPC_OK;
}
/**
@@ -765,7 +778,7 @@ int tipc_bclink_init(void)
bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
nomem:
- warn("Memory squeeze; Failed to create multicast link\n");
+ warn("Multicast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);
@@ -783,7 +796,7 @@ int tipc_bclink_init(void)
memset(bclink, 0, sizeof(struct bclink));
INIT_LIST_HEAD(&bcl->waiting_ports);
bcl->next_out_no = 1;
- bclink->node.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&bclink->node.lock);
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 0e3be2ab3307..b243d9d495f0 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -180,7 +180,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
if (!item->next) {
item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
if (!item->next) {
- warn("Memory squeeze: multicast destination port list is incomplete\n");
+ warn("Incomplete multicast delivery, no memory\n");
return;
}
item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e213a8e54855..7ef17a449cfd 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -112,39 +112,42 @@ int tipc_register_media(u32 media_type,
goto exit;
if (!media_name_valid(name)) {
- warn("Media registration error: illegal name <%s>\n", name);
+ warn("Media <%s> rejected, illegal name\n", name);
goto exit;
}
if (!bcast_addr) {
- warn("Media registration error: no broadcast address supplied\n");
+ warn("Media <%s> rejected, no broadcast address\n", name);
goto exit;
}
if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
(bearer_priority > TIPC_MAX_LINK_PRI)) {
- warn("Media registration error: priority %u\n", bearer_priority);
+ warn("Media <%s> rejected, illegal priority (%u)\n", name,
+ bearer_priority);
goto exit;
}
if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
(link_tolerance > TIPC_MAX_LINK_TOL)) {
- warn("Media registration error: tolerance %u\n", link_tolerance);
+ warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
+ link_tolerance);
goto exit;
}
media_id = media_count++;
if (media_id >= MAX_MEDIA) {
- warn("Attempt to register more than %u media\n", MAX_MEDIA);
+ warn("Media <%s> rejected, media limit reached (%u)\n", name,
+ MAX_MEDIA);
media_count--;
goto exit;
}
for (i = 0; i < media_id; i++) {
if (media_list[i].type_id == media_type) {
- warn("Attempt to register second media with type %u\n",
+ warn("Media <%s> rejected, duplicate type (%u)\n", name,
media_type);
media_count--;
goto exit;
}
if (!strcmp(name, media_list[i].name)) {
- warn("Attempt to re-register media name <%s>\n", name);
+ warn("Media <%s> rejected, duplicate name\n", name);
media_count--;
goto exit;
}
@@ -283,6 +286,9 @@ static struct bearer *bearer_find(const char *name)
struct bearer *b_ptr;
u32 i;
+ if (tipc_mode != TIPC_NET_MODE)
+ return NULL;
+
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
return b_ptr;
@@ -475,26 +481,33 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
u32 i;
int res = -EINVAL;
- if (tipc_mode != TIPC_NET_MODE)
+ if (tipc_mode != TIPC_NET_MODE) {
+ warn("Bearer <%s> rejected, not supported in standalone mode\n",
+ name);
return -ENOPROTOOPT;
-
- if (!bearer_name_validate(name, &b_name) ||
- !tipc_addr_domain_valid(bcast_scope) ||
- !in_scope(bcast_scope, tipc_own_addr))
+ }
+ if (!bearer_name_validate(name, &b_name)) {
+ warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
-
+ }
+ if (!tipc_addr_domain_valid(bcast_scope) ||
+ !in_scope(bcast_scope, tipc_own_addr)) {
+ warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
+ return -EINVAL;
+ }
if ((priority < TIPC_MIN_LINK_PRI ||
priority > TIPC_MAX_LINK_PRI) &&
- (priority != TIPC_MEDIA_LINK_PRI))
+ (priority != TIPC_MEDIA_LINK_PRI)) {
+ warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
+ }
write_lock_bh(&tipc_net_lock);
- if (!tipc_bearers)
- goto failed;
m_ptr = media_find(b_name.media_name);
if (!m_ptr) {
- warn("No media <%s>\n", b_name.media_name);
+ warn("Bearer <%s> rejected, media <%s> not registered\n", name,
+ b_name.media_name);
goto failed;
}
@@ -510,23 +523,24 @@ restart:
continue;
}
if (!strcmp(name, tipc_bearers[i].publ.name)) {
- warn("Bearer <%s> already enabled\n", name);
+ warn("Bearer <%s> rejected, already enabled\n", name);
goto failed;
}
if ((tipc_bearers[i].priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
- warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
- name, priority + 1, priority);
+ warn("Bearer <%s> rejected, duplicate priority\n",
+ name);
goto failed;
}
- warn("Third bearer <%s> with priority %u, lowering to %u\n",
+ warn("Bearer <%s> priority adjustment required %u->%u\n",
name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
- warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
+ warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
+ name, MAX_BEARERS);
goto failed;
}
@@ -536,7 +550,7 @@ restart:
strcpy(b_ptr->publ.name, name);
res = m_ptr->enable_bearer(&b_ptr->publ);
if (res) {
- warn("Failed to enable bearer <%s>\n", name);
+ warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
goto failed;
}
@@ -552,7 +566,7 @@ restart:
b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
bcast_scope, 2);
}
- b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&b_ptr->publ.lock);
write_unlock_bh(&tipc_net_lock);
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name, addr_string_fill(addr_string, bcast_scope), priority);
@@ -573,9 +587,6 @@ int tipc_block_bearer(const char *name)
struct link *l_ptr;
struct link *temp_l_ptr;
- if (tipc_mode != TIPC_NET_MODE)
- return -ENOPROTOOPT;
-
read_lock_bh(&tipc_net_lock);
b_ptr = bearer_find(name);
if (!b_ptr) {
@@ -584,6 +595,7 @@ int tipc_block_bearer(const char *name)
return -EINVAL;
}
+ info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->publ.lock);
b_ptr->publ.blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -595,7 +607,6 @@ int tipc_block_bearer(const char *name)
}
spin_unlock_bh(&b_ptr->publ.lock);
read_unlock_bh(&tipc_net_lock);
- info("Blocked bearer <%s>\n", name);
return TIPC_OK;
}
@@ -611,15 +622,13 @@ static int bearer_disable(const char *name)
struct link *l_ptr;
struct link *temp_l_ptr;
- if (tipc_mode != TIPC_NET_MODE)
- return -ENOPROTOOPT;
-
b_ptr = bearer_find(name);
if (!b_ptr) {
warn("Attempt to disable unknown bearer <%s>\n", name);
return -EINVAL;
}
+ info("Disabling bearer <%s>\n", name);
tipc_disc_stop_link_req(b_ptr->link_req);
spin_lock_bh(&b_ptr->publ.lock);
b_ptr->link_req = NULL;
@@ -635,7 +644,6 @@ static int bearer_disable(const char *name)
tipc_link_delete(l_ptr);
}
spin_unlock_bh(&b_ptr->publ.lock);
- info("Disabled bearer <%s>\n", name);
memset(b_ptr, 0, sizeof(struct bearer));
return TIPC_OK;
}
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1aed81584e96..1dcb6940e338 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -60,8 +60,10 @@ struct cluster *tipc_cltr_create(u32 addr)
int alloc;
c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
- if (c_ptr == NULL)
+ if (c_ptr == NULL) {
+ warn("Cluster creation failure, no memory\n");
return NULL;
+ }
memset(c_ptr, 0, sizeof(*c_ptr));
c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
@@ -70,30 +72,32 @@ struct cluster *tipc_cltr_create(u32 addr)
else
max_nodes = tipc_max_nodes + 1;
alloc = sizeof(void *) * (max_nodes + 1);
+
c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
if (c_ptr->nodes == NULL) {
+ warn("Cluster creation failure, no memory for node area\n");
kfree(c_ptr);
return NULL;
}
- memset(c_ptr->nodes, 0, alloc);
+ memset(c_ptr->nodes, 0, alloc);
+
if (in_own_cluster(addr))
tipc_local_nodes = c_ptr->nodes;
c_ptr->highest_slave = LOWEST_SLAVE - 1;
c_ptr->highest_node = 0;
z_ptr = tipc_zone_find(tipc_zone(addr));
- if (z_ptr == NULL) {
+ if (!z_ptr) {
z_ptr = tipc_zone_create(addr);
}
- if (z_ptr != NULL) {
- tipc_zone_attach_cluster(z_ptr, c_ptr);
- c_ptr->owner = z_ptr;
- }
- else {
+ if (!z_ptr) {
+ kfree(c_ptr->nodes);
kfree(c_ptr);
- c_ptr = NULL;
+ return NULL;
}
+ tipc_zone_attach_cluster(z_ptr, c_ptr);
+ c_ptr->owner = z_ptr;
return c_ptr;
}
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 48b5de2dbe60..285e1bc2d880 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -63,7 +63,7 @@ struct manager {
static struct manager mng = { 0};
-static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(config_lock);
static const void *req_tlv_area; /* request message TLV area */
static int req_tlv_space; /* request message TLV area size */
@@ -291,13 +291,22 @@ static struct sk_buff *cfg_set_own_addr(void)
if (!tipc_addr_node_valid(addr))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (node address)");
- if (tipc_own_addr)
+ if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change node address once assigned)");
+ tipc_own_addr = addr;
+
+ /*
+ * Must release all spinlocks before calling start_net() because
+ * Linux version of TIPC calls eth_media_start() which calls
+ * register_netdevice_notifier() which may block!
+ *
+ * Temporarily releasing the lock should be harmless for non-Linux TIPC,
+ * but Linux version of eth_media_start() should really be reworked
+ * so that it can be called with spinlocks held.
+ */
spin_unlock_bh(&config_lock);
- tipc_core_stop_net();
- tipc_own_addr = addr;
tipc_core_start_net();
spin_lock_bh(&config_lock);
return tipc_cfg_reply_none();
@@ -350,50 +359,21 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
static struct sk_buff *cfg_set_max_ports(void)
{
- int orig_mode;
u32 value;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
+ if (value == tipc_max_ports)
+ return tipc_cfg_reply_none();
if (value != delimit(value, 127, 65535))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max ports must be 127-65535)");
-
- if (value == tipc_max_ports)
- return tipc_cfg_reply_none();
-
- if (atomic_read(&tipc_user_count) > 2)
+ if (tipc_mode != TIPC_NOT_RUNNING)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max ports while TIPC users exist)");
-
- spin_unlock_bh(&config_lock);
- orig_mode = tipc_get_mode();
- if (orig_mode == TIPC_NET_MODE)
- tipc_core_stop_net();
- tipc_core_stop();
+ " (cannot change max ports while TIPC is active)");
tipc_max_ports = value;
- tipc_core_start();
- if (orig_mode == TIPC_NET_MODE)
- tipc_core_start_net();
- spin_lock_bh(&config_lock);
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *set_net_max(int value, int *parameter)
-{
- int orig_mode;
-
- if (value != *parameter) {
- orig_mode = tipc_get_mode();
- if (orig_mode == TIPC_NET_MODE)
- tipc_core_stop_net();
- *parameter = value;
- if (orig_mode == TIPC_NET_MODE)
- tipc_core_start_net();
- }
-
return tipc_cfg_reply_none();
}
@@ -405,10 +385,16 @@ static struct sk_buff *cfg_set_max_zones(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
+ if (value == tipc_max_zones)
+ return tipc_cfg_reply_none();
if (value != delimit(value, 1, 255))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max zones must be 1-255)");
- return set_net_max(value, &tipc_max_zones);
+ if (tipc_mode == TIPC_NET_MODE)
+ return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change max zones once TIPC has joined a network)");
+ tipc_max_zones = value;
+ return tipc_cfg_reply_none();
}
static struct sk_buff *cfg_set_max_clusters(void)
@@ -419,8 +405,8 @@ static struct sk_buff *cfg_set_max_clusters(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
- if (value != 1)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ if (value != delimit(value, 1, 1))
+ return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max clusters fixed at 1)");
return tipc_cfg_reply_none();
}
@@ -433,10 +419,16 @@ static struct sk_buff *cfg_set_max_nodes(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
+ if (value == tipc_max_nodes)
+ return tipc_cfg_reply_none();
if (value != delimit(value, 8, 2047))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max nodes must be 8-2047)");
- return set_net_max(value, &tipc_max_nodes);
+ if (tipc_mode == TIPC_NET_MODE)
+ return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change max nodes once TIPC has joined a network)");
+ tipc_max_nodes = value;
+ return tipc_cfg_reply_none();
}
static struct sk_buff *cfg_set_max_slaves(void)
@@ -461,15 +453,16 @@ static struct sk_buff *cfg_set_netid(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
+ if (value == tipc_net_id)
+ return tipc_cfg_reply_none();
if (value != delimit(value, 1, 9999))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network id must be 1-9999)");
-
- if (tipc_own_addr)
+ if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change network id once part of network)");
-
- return set_net_max(value, &tipc_net_id);
+ " (cannot change network id once TIPC has joined a network)");
+ tipc_net_id = value;
+ return tipc_cfg_reply_none();
}
struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
@@ -649,7 +642,7 @@ static void cfg_named_msg_event(void *userdata,
if ((size < sizeof(*req_hdr)) ||
(size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
(ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
- warn("discarded invalid configuration message\n");
+ warn("Invalid configuration message discarded\n");
return;
}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3d0a8ee4e1d3..5003acb15919 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -2,7 +2,7 @@
* net/tipc/core.c: TIPC module code
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@ void tipc_socket_stop(void);
int tipc_netlink_start(void);
void tipc_netlink_stop(void);
-#define MOD_NAME "tipc_start: "
+#define TIPC_MOD_VER "1.6.1"
#ifndef CONFIG_TIPC_ZONES
#define CONFIG_TIPC_ZONES 3
@@ -198,7 +198,7 @@ static int __init tipc_init(void)
tipc_max_publications = 10000;
tipc_max_subscriptions = 2000;
tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
- tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
+ tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
@@ -224,6 +224,7 @@ module_exit(tipc_exit);
MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(TIPC_MOD_VER);
/* Native TIPC API for kernel-space applications (see tipc.h) */
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1f2e8b27a13f..86f54f3512f1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
* net/tipc/core.h: Include file for TIPC global declarations
*
* Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,10 +111,6 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#else
-#ifndef DBG_OUTPUT
-#define DBG_OUTPUT NULL
-#endif
-
/*
* TIPC debug support not included:
* - system messages are printed to system console
@@ -129,6 +125,19 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#define msg_dbg(msg,txt) do {} while (0)
#define dump(fmt,arg...) do {} while (0)
+
+/*
+ * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
+ * the null print buffer. Thes ensures that any system or debug messages
+ * that are generated without using the above macros are handled correctly.
+ */
+
+#undef TIPC_OUTPUT
+#define TIPC_OUTPUT TIPC_CONS
+
+#undef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+
#endif
@@ -309,7 +318,7 @@ static inline struct sk_buff *buf_acquire(u32 size)
* buf_discard - frees a TIPC message buffer
* @skb: message buffer
*
- * Frees a new buffer. If passed NULL, just returns.
+ * Frees a message buffer. If passed NULL, just returns.
*/
static inline void buf_discard(struct sk_buff *skb)
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 26ef95d5fe38..55130655e1ed 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -41,7 +41,7 @@
#define MAX_STRING 512
static char print_string[MAX_STRING];
-static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(print_lock);
static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
struct print_buf *TIPC_CONS = &cons_buf;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 92601385e5f5..2b8441203120 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -2,7 +2,7 @@
* net/tipc/discover.c
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
n_ptr = tipc_node_create(orig);
}
if (n_ptr == NULL) {
- warn("Memory squeeze; Failed to create node\n");
return;
}
spin_lock_bh(&n_ptr->lock);
@@ -191,10 +190,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
}
addr = &link->media_addr;
if (memcmp(addr, &media_addr, sizeof(*addr))) {
- char addr_string[16];
-
- warn("New bearer address for %s\n",
- addr_string_fill(addr_string, orig));
+ warn("Resetting link <%s>, peer interface address changed\n",
+ link->name);
memcpy(addr, &media_addr, sizeof(*addr));
tipc_link_reset(link);
}
@@ -270,8 +267,8 @@ static void disc_timeout(struct link_req *req)
/* leave timer interval "as is" if already at a "normal" rate */
} else {
req->timer_intv *= 2;
- if (req->timer_intv > TIPC_LINK_REQ_SLOW)
- req->timer_intv = TIPC_LINK_REQ_SLOW;
+ if (req->timer_intv > TIPC_LINK_REQ_FAST)
+ req->timer_intv = TIPC_LINK_REQ_FAST;
if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
(req->bearer->nodes.count))
req->timer_intv = TIPC_LINK_REQ_SLOW;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 7a252785f727..682da4a28041 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -98,17 +98,19 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
u32 size;
if (likely(eb_ptr->bearer)) {
- size = msg_size((struct tipc_msg *)buf->data);
- skb_trim(buf, size);
- if (likely(buf->len == size)) {
- buf->next = NULL;
- tipc_recv_msg(buf, eb_ptr->bearer);
- } else {
- kfree_skb(buf);
+ if (likely(!dev->promiscuity) ||
+ !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
+ !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
+ size = msg_size((struct tipc_msg *)buf->data);
+ skb_trim(buf, size);
+ if (likely(buf->len == size)) {
+ buf->next = NULL;
+ tipc_recv_msg(buf, eb_ptr->bearer);
+ return TIPC_OK;
+ }
}
- } else {
- kfree_skb(buf);
}
+ kfree_skb(buf);
return TIPC_OK;
}
@@ -125,8 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
- while (dev && dev->name &&
- (memcmp(dev->name, driver_name, strlen(dev->name)))) {
+ while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
dev = dev->next;
}
if (!dev)
@@ -252,7 +253,9 @@ int tipc_eth_media_start(void)
if (eth_started)
return -EINVAL;
- memset(&bcast_addr, 0xff, sizeof(bcast_addr));
+ bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
+ memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
+
memset(eth_bearers, 0, sizeof(eth_bearers));
res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 966f70a1b608..ae6ddf00a1aa 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -44,7 +44,7 @@ struct queue_item {
static kmem_cache_t *tipc_queue_item_cache;
static struct list_head signal_queue_head;
-static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(qitem_lock);
static int handler_enabled = 0;
static void process_signal_queue(unsigned long dummy);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 784b24b6d102..d64658053746 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -419,7 +419,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
if (!l_ptr) {
- warn("Memory squeeze; Failed to create link\n");
+ warn("Link creation failed, no memory\n");
return NULL;
}
memset(l_ptr, 0, sizeof(*l_ptr));
@@ -469,7 +469,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
if (!pb) {
kfree(l_ptr);
- warn("Memory squeeze; Failed to create link\n");
+ warn("Link creation failed, no memory for print buffer\n");
return NULL;
}
tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
@@ -574,7 +574,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
break;
list_del_init(&p_ptr->wait_list);
p_ptr->congested_link = NULL;
- assert(p_ptr->wakeup);
spin_lock_bh(p_ptr->publ.lock);
p_ptr->publ.congested = 0;
p_ptr->wakeup(&p_ptr->publ);
@@ -691,6 +690,7 @@ void tipc_link_reset(struct link *l_ptr)
struct sk_buff *buf;
u32 prev_state = l_ptr->state;
u32 checkpoint = l_ptr->next_in_no;
+ int was_active_link = tipc_link_is_active(l_ptr);
msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
@@ -712,7 +712,7 @@ void tipc_link_reset(struct link *l_ptr)
tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
dbg_link_dump();
#endif
- if (tipc_node_has_active_links(l_ptr->owner) &&
+ if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
l_ptr->owner->permit_changeover) {
l_ptr->reset_checkpoint = checkpoint;
l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -755,7 +755,7 @@ void tipc_link_reset(struct link *l_ptr)
static void link_activate(struct link *l_ptr)
{
- l_ptr->next_in_no = 1;
+ l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
link_send_event(tipc_cfg_link_event, l_ptr, 1);
@@ -820,6 +820,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
break;
case RESET_MSG:
dbg_link("RES -> RR\n");
+ info("Resetting link <%s>, requested by peer\n",
+ l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -844,6 +846,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
break;
case RESET_MSG:
dbg_link("RES -> RR\n");
+ info("Resetting link <%s>, requested by peer "
+ "while probing\n", l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -875,6 +879,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
} else { /* Link has failed */
dbg_link("-> RU (%u probes unanswered)\n",
l_ptr->fsm_msg_cnt);
+ warn("Resetting link <%s>, peer not responding\n",
+ l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
@@ -1050,7 +1056,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
msg_dbg(msg, "TIPC: Congestion, throwing away\n");
buf_discard(buf);
if (imp > CONN_MANAGER) {
- warn("Resetting <%s>, send queue full", l_ptr->name);
+ warn("Resetting link <%s>, send queue full", l_ptr->name);
tipc_link_reset(l_ptr);
}
return dsz;
@@ -1135,9 +1141,13 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
- dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
if (l_ptr) {
+ dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
res = tipc_link_send_buf(l_ptr, buf);
+ } else {
+ dbg("Attempt to send msg to unreachable node:\n");
+ msg_dbg(buf_msg(buf),">>>");
+ buf_discard(buf);
}
tipc_node_unlock(n_ptr);
} else {
@@ -1242,8 +1252,6 @@ int tipc_link_send_sections_fast(struct port *sender,
int res;
u32 selector = msg_origport(hdr) & 1;
- assert(destaddr != tipc_own_addr);
-
again:
/*
* Try building message using port's max_pkt hint.
@@ -1604,40 +1612,121 @@ void tipc_link_push_queue(struct link *l_ptr)
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
}
+static void link_reset_all(unsigned long addr)
+{
+ struct node *n_ptr;
+ char addr_string[16];
+ u32 i;
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find((u32)addr);
+ if (!n_ptr) {
+ read_unlock_bh(&tipc_net_lock);
+ return; /* node no longer exists */
+ }
+
+ tipc_node_lock(n_ptr);
+
+ warn("Resetting all links to %s\n",
+ addr_string_fill(addr_string, n_ptr->addr));
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (n_ptr->links[i]) {
+ link_print(n_ptr->links[i], TIPC_OUTPUT,
+ "Resetting link\n");
+ tipc_link_reset(n_ptr->links[i]);
+ }
+ }
+
+ tipc_node_unlock(n_ptr);
+ read_unlock_bh(&tipc_net_lock);
+}
+
+static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+
+ warn("Retransmission failure on link <%s>\n", l_ptr->name);
+ tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>");
+
+ if (l_ptr->addr) {
+
+ /* Handle failure on standard link */
+
+ link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
+ tipc_link_reset(l_ptr);
+
+ } else {
+
+ /* Handle failure on broadcast link */
+
+ struct node *n_ptr;
+ char addr_string[16];
+
+ tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
+ tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle);
+
+ n_ptr = l_ptr->owner->next;
+ tipc_node_lock(n_ptr);
+
+ addr_string_fill(addr_string, n_ptr->addr);
+ tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
+ tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
+ tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
+ tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
+ tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
+ tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
+ tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
+
+ tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
+
+ tipc_node_unlock(n_ptr);
+
+ l_ptr->stale_count = 0;
+ }
+}
+
void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
u32 retransmits)
{
struct tipc_msg *msg;
+ if (!buf)
+ return;
+
+ msg = buf_msg(buf);
+
dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
- if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
- msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
- dbg_print_link(l_ptr, " ");
- l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
- l_ptr->retransm_queue_size = retransmits;
- return;
+ if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
+ if (!skb_cloned(buf)) {
+ msg_dbg(msg, ">NO_RETR->BCONG>");
+ dbg_print_link(l_ptr, " ");
+ l_ptr->retransm_queue_head = msg_seqno(msg);
+ l_ptr->retransm_queue_size = retransmits;
+ return;
+ } else {
+ /* Don't retransmit if driver already has the buffer */
+ }
+ } else {
+ /* Detect repeated retransmit failures on uncongested bearer */
+
+ if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+ if (++l_ptr->stale_count > 100) {
+ link_retransmit_failure(l_ptr, buf);
+ return;
+ }
+ } else {
+ l_ptr->last_retransmitted = msg_seqno(msg);
+ l_ptr->stale_count = 1;
+ }
}
+
while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
msg = buf_msg(buf);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- /* Catch if retransmissions fail repeatedly: */
- if (l_ptr->last_retransmitted == msg_seqno(msg)) {
- if (++l_ptr->stale_count > 100) {
- tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
- info("...Retransmitted %u times\n",
- l_ptr->stale_count);
- link_print(l_ptr, TIPC_CONS, "Resetting Link\n");
- tipc_link_reset(l_ptr);
- break;
- }
- } else {
- l_ptr->stale_count = 0;
- }
- l_ptr->last_retransmitted = msg_seqno(msg);
-
msg_dbg(buf_msg(buf), ">RETR>");
buf = buf->next;
retransmits--;
@@ -1650,6 +1739,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
return;
}
}
+
l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
}
@@ -1720,6 +1810,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
link_recv_non_seq(buf);
continue;
}
+
+ if (unlikely(!msg_short(msg) &&
+ (msg_destnode(msg) != tipc_own_addr)))
+ goto cont;
+
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
goto cont;
@@ -2140,7 +2235,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
- warn("Changing prio <%s>: %u->%u\n",
+ warn("Resetting link <%s>, priority change %u->%u\n",
l_ptr->name, l_ptr->priority, msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
@@ -2209,17 +2304,22 @@ void tipc_link_tunnel(struct link *l_ptr,
u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1];
- if (!tipc_link_is_up(tunnel))
+ if (!tipc_link_is_up(tunnel)) {
+ warn("Link changeover error, "
+ "tunnel link no longer available\n");
return;
+ }
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = buf_acquire(length + INT_H_SIZE);
- if (!buf)
+ if (!buf) {
+ warn("Link changeover error, "
+ "unable to send tunnel msg\n");
return;
+ }
memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
msg_dbg(buf_msg(buf), ">SEND>");
- assert(tunnel);
tipc_link_send_buf(tunnel, buf);
}
@@ -2235,23 +2335,27 @@ void tipc_link_changeover(struct link *l_ptr)
u32 msgcount = l_ptr->out_queue_size;
struct sk_buff *crs = l_ptr->first_out;
struct link *tunnel = l_ptr->owner->active_links[0];
- int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
struct tipc_msg tunnel_hdr;
+ int split_bundles;
if (!tunnel)
return;
- if (!l_ptr->owner->permit_changeover)
+ if (!l_ptr->owner->permit_changeover) {
+ warn("Link changeover error, "
+ "peer did not permit changeover\n");
return;
+ }
msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
+ dbg("Link changeover requires %u tunnel messages\n", msgcount);
+
if (!l_ptr->first_out) {
struct sk_buff *buf;
- assert(!msgcount);
buf = buf_acquire(INT_H_SIZE);
if (buf) {
memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2261,10 +2365,15 @@ void tipc_link_changeover(struct link *l_ptr)
msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
tipc_link_send_buf(tunnel, buf);
} else {
- warn("Memory squeeze; link changeover failed\n");
+ warn("Link changeover error, "
+ "unable to send changeover msg\n");
}
return;
}
+
+ split_bundles = (l_ptr->owner->active_links[0] !=
+ l_ptr->owner->active_links[1]);
+
while (crs) {
struct tipc_msg *msg = buf_msg(crs);
@@ -2310,7 +2419,8 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
outbuf = buf_acquire(length + INT_H_SIZE);
if (outbuf == NULL) {
- warn("Memory squeeze; buffer duplication failed\n");
+ warn("Link changeover error, "
+ "unable to send duplicate msg\n");
return;
}
memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2364,11 +2474,15 @@ static int link_recv_changeover_msg(struct link **l_ptr,
u32 msg_count = msg_msgcnt(tunnel_msg);
dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
- assert(dest_link != *l_ptr);
if (!dest_link) {
msg_dbg(tunnel_msg, "NOLINK/<REC<");
goto exit;
}
+ if (dest_link == *l_ptr) {
+ err("Unexpected changeover message on link <%s>\n",
+ (*l_ptr)->name);
+ goto exit;
+ }
dbg("%c<-%c:", dest_link->b_ptr->net_plane,
(*l_ptr)->b_ptr->net_plane);
*l_ptr = dest_link;
@@ -2381,7 +2495,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
}
*buf = buf_extract(tunnel_buf,INT_H_SIZE);
if (*buf == NULL) {
- warn("Memory squeeze; failed to extract msg\n");
+ warn("Link changeover error, duplicate msg dropped\n");
goto exit;
}
msg_dbg(tunnel_msg, "TNL<REC<");
@@ -2393,13 +2507,17 @@ static int link_recv_changeover_msg(struct link **l_ptr,
if (tipc_link_is_up(dest_link)) {
msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
+ info("Resetting link <%s>, changeover initiated by peer\n",
+ dest_link->name);
tipc_link_reset(dest_link);
dest_link->exp_msg_count = msg_count;
+ dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
dest_link->exp_msg_count = msg_count;
+ dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
}
@@ -2407,6 +2525,8 @@ static int link_recv_changeover_msg(struct link **l_ptr,
/* Receive original message */
if (dest_link->exp_msg_count == 0) {
+ warn("Link switchover error, "
+ "got too many tunnelled messages\n");
msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
dbg_print_link(dest_link, "LINK:");
goto exit;
@@ -2422,7 +2542,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
buf_discard(tunnel_buf);
return 1;
} else {
- warn("Memory squeeze; dropped incoming msg\n");
+ warn("Link changeover error, original msg dropped\n");
}
}
exit:
@@ -2444,13 +2564,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
- char addr_string[16];
-
- warn("Buffer allocation failure;\n");
- warn(" incoming message(s) from %s lost\n",
- addr_string_fill(addr_string,
- msg_orignode(buf_msg(buf))));
- return;
+ warn("Link unable to unbundle message(s)\n");
+ break;
};
pos += align(msg_size(buf_msg(obuf)));
msg_dbg(buf_msg(obuf), " /");
@@ -2508,7 +2623,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
}
fragm = buf_acquire(fragm_sz + INT_H_SIZE);
if (fragm == NULL) {
- warn("Memory squeeze; failed to fragment msg\n");
+ warn("Link unable to fragment message\n");
dsz = -ENOMEM;
goto exit;
}
@@ -2623,7 +2738,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
set_fragm_size(pbuf,fragm_sz);
set_expected_frags(pbuf,exp_fragm_cnt - 1);
} else {
- warn("Memory squeeze; got no defragmenting buffer\n");
+ warn("Link unable to reassemble fragmented message\n");
}
buf_discard(fbuf);
return 0;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a3bbc891f959..f0b063bcc2a9 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -127,7 +127,7 @@ void tipc_named_publish(struct publication *publ)
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
- warn("Memory squeeze; failed to distribute publication\n");
+ warn("Publication distribution failure\n");
return;
}
@@ -151,7 +151,7 @@ void tipc_named_withdraw(struct publication *publ)
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
- warn("Memory squeeze; failed to distribute withdrawal\n");
+ warn("Withdrawl distribution failure\n");
return;
}
@@ -174,7 +174,6 @@ void tipc_named_node_up(unsigned long node)
u32 rest;
u32 max_item_buf;
- assert(in_own_cluster(node));
read_lock_bh(&tipc_nametbl_lock);
max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
max_item_buf *= ITEM_SIZE;
@@ -185,8 +184,8 @@ void tipc_named_node_up(unsigned long node)
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
- if (buf == NULL) {
- warn("Memory Squeeze; could not send publication\n");
+ if (!buf) {
+ warn("Bulk publication distribution failure\n");
goto exit;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -221,15 +220,24 @@ exit:
static void node_is_down(struct publication *publ)
{
struct publication *p;
+
write_lock_bh(&tipc_nametbl_lock);
dbg("node_is_down: withdrawing %u, %u, %u\n",
publ->type, publ->lower, publ->upper);
publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
- assert(p == publ);
write_unlock_bh(&tipc_nametbl_lock);
- kfree(publ);
+
+ if (p != publ) {
+ err("Unable to remove publication from failed node\n"
+ "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node, publ->ref, publ->key);
+ }
+
+ if (p) {
+ kfree(p);
+ }
}
/**
@@ -275,9 +283,15 @@ void tipc_named_recv(struct sk_buff *buf)
if (publ) {
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
+ } else {
+ err("Unable to remove publication by node 0x%x\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ msg_orignode(msg),
+ ntohl(item->type), ntohl(item->lower),
+ ntohl(item->ref), ntohl(item->key));
}
} else {
- warn("tipc_named_recv: unknown msg\n");
+ warn("Unrecognized name table message received\n");
}
item++;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d129422fc5c2..a6926ff07bcc 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -71,7 +71,7 @@ struct sub_seq {
* @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
* sub-sequences are sorted in ascending order
* @alloc: number of sub-sequences currently in array
- * @first_free: upper bound of highest sub-sequence + 1
+ * @first_free: array index of first unused sub-sequence entry
* @ns_list: links to adjacent name sequences in hash chain
* @subscriptions: list of subscriptions for this 'type'
* @lock: spinlock controlling access to name sequence structure
@@ -101,7 +101,7 @@ struct name_table {
static struct name_table table = { NULL } ;
static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
-rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(tipc_nametbl_lock);
static int hash(int x)
@@ -120,7 +120,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
struct publication *publ =
(struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
- warn("Memory squeeze; failed to create publication\n");
+ warn("Publication creation failure, no memory\n");
return NULL;
}
@@ -165,17 +165,17 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
- warn("Memory squeeze; failed to create name sequence\n");
+ warn("Name sequence creation failed, no memory\n");
kfree(nseq);
kfree(sseq);
return NULL;
}
memset(nseq, 0, sizeof(*nseq));
- nseq->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&nseq->lock);
nseq->type = type;
nseq->sseqs = sseq;
- dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
+ dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
nseq, type, nseq->sseqs, nseq->first_free);
nseq->alloc = 1;
INIT_HLIST_NODE(&nseq->ns_list);
@@ -253,16 +253,16 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
struct sub_seq *sseq;
int created_subseq = 0;
- assert(nseq->first_free <= nseq->alloc);
sseq = nameseq_find_subseq(nseq, lower);
- dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
+ dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
nseq, type, lower, sseq);
if (sseq) {
/* Lower end overlaps existing entry => need an exact match */
if ((sseq->lower != lower) || (sseq->upper != upper)) {
- warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+ warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
} else {
@@ -277,25 +277,27 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
- warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+ warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
/* Ensure there is space for new sub-sequence */
if (nseq->first_free == nseq->alloc) {
- struct sub_seq *sseqs = nseq->sseqs;
- nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
- if (nseq->sseqs != NULL) {
- memcpy(nseq->sseqs, sseqs,
- nseq->alloc * sizeof (struct sub_seq));
- kfree(sseqs);
- dbg("Allocated %u sseqs\n", nseq->alloc);
- nseq->alloc *= 2;
- } else {
- warn("Memory squeeze; failed to create sub-sequence\n");
+ struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
+
+ if (!sseqs) {
+ warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
+ dbg("Allocated %u more sseqs\n", nseq->alloc);
+ memcpy(sseqs, nseq->sseqs,
+ nseq->alloc * sizeof(struct sub_seq));
+ kfree(nseq->sseqs);
+ nseq->sseqs = sseqs;
+ nseq->alloc *= 2;
}
dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
@@ -311,7 +313,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
sseq->upper = upper;
created_subseq = 1;
}
- dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
+ dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
type, lower, upper, node, port, sseq,
sseq->lower, sseq->upper, nseq);
@@ -320,7 +322,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
return NULL;
- dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
+ dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
publ, node, publ->node, publ->subscr.node);
if (!sseq->zone_list)
@@ -367,45 +369,47 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/**
* tipc_nameseq_remove_publ -
+ *
+ * NOTE: There may be cases where TIPC is asked to remove a publication
+ * that is not in the name table. For example, if another node issues a
+ * publication for a name sequence that overlaps an existing name sequence
+ * the publication will not be recorded, which means the publication won't
+ * be found when the name sequence is later withdrawn by that node.
+ * A failed withdraw request simply returns a failure indication and lets the
+ * caller issue any error or warning messages associated with such a problem.
*/
static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
u32 node, u32 ref, u32 key)
{
struct publication *publ;
+ struct publication *curr;
struct publication *prev;
struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
struct sub_seq *free;
struct subscription *s, *st;
int removed_subseq = 0;
- assert(nseq);
-
- if (!sseq) {
- int i;
-
- warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
- assert(nseq->sseqs);
- dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
- nseq->sseqs, nseq, nseq->alloc,
- nseq->first_free);
- for (i = 0; i < nseq->first_free; i++) {
- dbg("Subseq %u(%x): lower = %u,upper = %u\n",
- i, &nseq->sseqs[i], nseq->sseqs[i].lower,
- nseq->sseqs[i].upper);
- }
+ if (!sseq)
return NULL;
- }
- dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
+
+ dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
nseq, sseq, nseq->type, inst, key);
+ /* Remove publication from zone scope list */
+
prev = sseq->zone_list;
publ = sseq->zone_list->zone_list_next;
while ((publ->key != key) || (publ->ref != ref) ||
(publ->node && (publ->node != node))) {
prev = publ;
publ = publ->zone_list_next;
- assert(prev != sseq->zone_list);
+ if (prev == sseq->zone_list) {
+
+ /* Prevent endless loop if publication not found */
+
+ return NULL;
+ }
}
if (publ != sseq->zone_list)
prev->zone_list_next = publ->zone_list_next;
@@ -416,14 +420,24 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->zone_list = NULL;
}
+ /* Remove publication from cluster scope list, if present */
+
if (in_own_cluster(node)) {
prev = sseq->cluster_list;
- publ = sseq->cluster_list->cluster_list_next;
- while ((publ->key != key) || (publ->ref != ref) ||
- (publ->node && (publ->node != node))) {
- prev = publ;
- publ = publ->cluster_list_next;
- assert(prev != sseq->cluster_list);
+ curr = sseq->cluster_list->cluster_list_next;
+ while (curr != publ) {
+ prev = curr;
+ curr = curr->cluster_list_next;
+ if (prev == sseq->cluster_list) {
+
+ /* Prevent endless loop for malformed list */
+
+ err("Unable to de-list cluster publication\n"
+ "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ goto end_cluster;
+ }
}
if (publ != sseq->cluster_list)
prev->cluster_list_next = publ->cluster_list_next;
@@ -434,15 +448,26 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->cluster_list = NULL;
}
}
+end_cluster:
+
+ /* Remove publication from node scope list, if present */
if (node == tipc_own_addr) {
prev = sseq->node_list;
- publ = sseq->node_list->node_list_next;
- while ((publ->key != key) || (publ->ref != ref) ||
- (publ->node && (publ->node != node))) {
- prev = publ;
- publ = publ->node_list_next;
- assert(prev != sseq->node_list);
+ curr = sseq->node_list->node_list_next;
+ while (curr != publ) {
+ prev = curr;
+ curr = curr->node_list_next;
+ if (prev == sseq->node_list) {
+
+ /* Prevent endless loop for malformed list */
+
+ err("Unable to de-list node publication\n"
+ "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ goto end_node;
+ }
}
if (publ != sseq->node_list)
prev->node_list_next = publ->node_list_next;
@@ -453,22 +478,18 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->node_list = NULL;
}
}
- assert(!publ->node || (publ->node == node));
- assert(publ->ref == ref);
- assert(publ->key == key);
+end_node:
- /*
- * Contract subseq list if no more publications:
- */
- if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
+ /* Contract subseq list if no more publications for that subseq */
+
+ if (!sseq->zone_list) {
free = &nseq->sseqs[nseq->first_free--];
memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
removed_subseq = 1;
}
- /*
- * Any subscriptions waiting ?
- */
+ /* Notify any waiting subscriptions */
+
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscr_report_overlap(s,
publ->lower,
@@ -478,6 +499,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
publ->node,
removed_subseq);
}
+
return publ;
}
@@ -530,7 +552,7 @@ static struct name_seq *nametbl_find_seq(u32 type)
seq_head = &table.types[hash(type)];
hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
if (ns->type == type) {
- dbg("found %x\n", ns);
+ dbg("found %p\n", ns);
return ns;
}
}
@@ -543,22 +565,21 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
{
struct name_seq *seq = nametbl_find_seq(type);
- dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
+ dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
if (lower > upper) {
- warn("Failed to publish illegal <%u,%u,%u>\n",
+ warn("Failed to publish illegal {%u,%u,%u}\n",
type, lower, upper);
return NULL;
}
- dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
+ dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
if (!seq) {
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
- dbg("tipc_nametbl_insert_publ: created %x\n", seq);
+ dbg("tipc_nametbl_insert_publ: created %p\n", seq);
}
if (!seq)
return NULL;
- assert(seq->type == type);
return tipc_nameseq_insert_publ(seq, type, lower, upper,
scope, node, port, key);
}
@@ -572,7 +593,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
if (!seq)
return NULL;
- dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
+ dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
if (!seq->first_free && list_empty(&seq->subscriptions)) {
@@ -738,12 +759,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *publ;
if (table.local_publ_count >= tipc_max_publications) {
- warn("Failed publish: max %u local publication\n",
+ warn("Publication failed, local publication limit reached (%u)\n",
tipc_max_publications);
return NULL;
}
if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
- warn("Failed to publish reserved name <%u,%u,%u>\n",
+ warn("Publication failed, reserved name {%u,%u,%u}\n",
type, lower, upper);
return NULL;
}
@@ -767,10 +788,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
- dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
+ dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
write_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
- if (publ) {
+ if (likely(publ)) {
table.local_publ_count--;
if (publ->scope != TIPC_NODE_SCOPE)
tipc_named_withdraw(publ);
@@ -780,6 +801,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
return 1;
}
write_unlock_bh(&tipc_nametbl_lock);
+ err("Unable to remove local publication\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ type, lower, ref, key);
return 0;
}
@@ -787,8 +811,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
-void
-tipc_nametbl_subscribe(struct subscription *s)
+void tipc_nametbl_subscribe(struct subscription *s)
{
u32 type = s->seq.type;
struct name_seq *seq;
@@ -800,11 +823,13 @@ tipc_nametbl_subscribe(struct subscription *s)
}
if (seq){
spin_lock_bh(&seq->lock);
- dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
+ dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
seq, type, s->seq.lower, s->seq.upper);
- assert(seq->type == type);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
+ } else {
+ warn("Failed to create subscription for {%u,%u,%u}\n",
+ s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@@ -813,8 +838,7 @@ tipc_nametbl_subscribe(struct subscription *s)
* tipc_nametbl_unsubscribe - remove a subscription object from name table
*/
-void
-tipc_nametbl_unsubscribe(struct subscription *s)
+void tipc_nametbl_unsubscribe(struct subscription *s)
{
struct name_seq *seq;
@@ -1049,35 +1073,20 @@ int tipc_nametbl_init(void)
void tipc_nametbl_stop(void)
{
- struct hlist_head *seq_head;
- struct hlist_node *seq_node;
- struct hlist_node *tmp;
- struct name_seq *seq;
u32 i;
if (!table.types)
return;
+ /* Verify name table is empty, then release it */
+
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < tipc_nametbl_size; i++) {
- seq_head = &table.types[i];
- hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
- struct sub_seq *sseq = seq->sseqs;
-
- for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
- struct publication *publ = sseq->zone_list;
- assert(publ);
- do {
- struct publication *next =
- publ->zone_list_next;
- kfree(publ);
- publ = next;
- }
- while (publ != sseq->zone_list);
- }
- }
+ if (!hlist_empty(&table.types[i]))
+ err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
}
kfree(table.types);
table.types = NULL;
write_unlock_bh(&tipc_nametbl_lock);
}
+
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f7c8223ddf7d..e5a359ab4930 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -115,7 +115,7 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
-rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(tipc_net_lock);
struct network tipc_net = { NULL };
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0d5db06e203f..861322b935da 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -61,34 +61,37 @@ struct node *tipc_node_create(u32 addr)
struct node **curr_node;
n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
- if (n_ptr != NULL) {
- memset(n_ptr, 0, sizeof(*n_ptr));
- n_ptr->addr = addr;
- n_ptr->lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&n_ptr->nsub);
-
- c_ptr = tipc_cltr_find(addr);
- if (c_ptr == NULL)
- c_ptr = tipc_cltr_create(addr);
- if (c_ptr != NULL) {
- n_ptr->owner = c_ptr;
- tipc_cltr_attach_node(c_ptr, n_ptr);
- n_ptr->last_router = -1;
-
- /* Insert node into ordered list */
- for (curr_node = &tipc_nodes; *curr_node;
- curr_node = &(*curr_node)->next) {
- if (addr < (*curr_node)->addr) {
- n_ptr->next = *curr_node;
- break;
- }
- }
- (*curr_node) = n_ptr;
- } else {
- kfree(n_ptr);
- n_ptr = NULL;
- }
- }
+ if (!n_ptr) {
+ warn("Node creation failed, no memory\n");
+ return NULL;
+ }
+
+ c_ptr = tipc_cltr_find(addr);
+ if (!c_ptr) {
+ c_ptr = tipc_cltr_create(addr);
+ }
+ if (!c_ptr) {
+ kfree(n_ptr);
+ return NULL;
+ }
+
+ memset(n_ptr, 0, sizeof(*n_ptr));
+ n_ptr->addr = addr;
+ spin_lock_init(&n_ptr->lock);
+ INIT_LIST_HEAD(&n_ptr->nsub);
+ n_ptr->owner = c_ptr;
+ tipc_cltr_attach_node(c_ptr, n_ptr);
+ n_ptr->last_router = -1;
+
+ /* Insert node into ordered list */
+ for (curr_node = &tipc_nodes; *curr_node;
+ curr_node = &(*curr_node)->next) {
+ if (addr < (*curr_node)->addr) {
+ n_ptr->next = *curr_node;
+ break;
+ }
+ }
+ (*curr_node) = n_ptr;
return n_ptr;
}
@@ -122,6 +125,8 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
{
struct link **active = &n_ptr->active_links[0];
+ n_ptr->working_links++;
+
info("Established link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
@@ -132,7 +137,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
return;
}
if (l_ptr->priority < active[0]->priority) {
- info("Link is standby\n");
+ info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
tipc_link_send_duplicate(active[0], l_ptr);
@@ -140,8 +145,9 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
active[0] = l_ptr;
return;
}
- info("Link <%s> on network plane %c becomes standby\n",
- active[0]->name, active[0]->b_ptr->net_plane);
+ info("Old link <%s> becomes standby\n", active[0]->name);
+ if (active[1] != active[0])
+ info("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
}
@@ -181,6 +187,8 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
{
struct link **active;
+ n_ptr->working_links--;
+
if (!tipc_link_is_active(l_ptr)) {
info("Lost standby link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
@@ -210,8 +218,7 @@ int tipc_node_has_active_links(struct node *n_ptr)
int tipc_node_has_redundant_links(struct node *n_ptr)
{
- return (tipc_node_has_active_links(n_ptr) &&
- (n_ptr->active_links[0] != n_ptr->active_links[1]));
+ return (n_ptr->working_links > 1);
}
static int tipc_node_has_active_routes(struct node *n_ptr)
@@ -234,7 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
u32 bearer_id = l_ptr->b_ptr->identity;
char addr_string[16];
- assert(bearer_id < MAX_BEARERS);
if (n_ptr->link_cnt >= 2) {
char addr_string[16];
@@ -249,7 +255,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
n_ptr->link_cnt++;
return n_ptr;
}
- err("Attempt to establish second link on <%s> to <%s> \n",
+ err("Attempt to establish second link on <%s> to %s \n",
l_ptr->b_ptr->publ.name,
addr_string_fill(addr_string, l_ptr->addr));
}
@@ -314,7 +320,7 @@ static void node_established_contact(struct node *n_ptr)
struct cluster *c_ptr;
dbg("node_established_contact:-> %x\n", n_ptr->addr);
- if (!tipc_node_has_active_routes(n_ptr)) {
+ if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 781126e084ae..a07cc79ea637 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -51,6 +51,7 @@
* @nsub: list of "node down" subscriptions monitoring node
* @active_links: pointers to active links to node
* @links: pointers to all links to node
+ * @working_links: number of working links to node (both active and standby)
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @routers: bitmap (used for multicluster communication)
@@ -76,6 +77,7 @@ struct node {
struct link *active_links[2];
struct link *links[MAX_BEARERS];
int link_cnt;
+ int working_links;
int permit_changeover;
u32 routers[512/32];
int last_router;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index cff4068cc755..cc3fff3dec4f 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -47,18 +47,19 @@
void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
- node_sub->node = NULL;
- if (addr == tipc_own_addr)
+ if (addr == tipc_own_addr) {
+ node_sub->node = NULL;
return;
- if (!tipc_addr_node_valid(addr)) {
- warn("node_subscr with illegal %x\n", addr);
+ }
+
+ node_sub->node = tipc_node_find(addr);
+ if (!node_sub->node) {
+ warn("Node subscription rejected, unknown node 0x%x\n", addr);
return;
}
-
node_sub->handle_node_down = handle_down;
node_sub->usr_handle = usr_handle;
- node_sub->node = tipc_node_find(addr);
- assert(node_sub->node);
+
tipc_node_lock(node_sub->node);
list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
tipc_node_unlock(node_sub->node);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 67e96cb1e825..3251c8d8e53c 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,8 +57,8 @@
static struct sk_buff *msg_queue_head = NULL;
static struct sk_buff *msg_queue_tail = NULL;
-spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(tipc_port_list_lock);
+static DEFINE_SPINLOCK(queue_lock);
static LIST_HEAD(ports);
static void port_handle_node_down(unsigned long ref);
@@ -168,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
struct port_list *item = dp;
int cnt = 0;
- assert(buf);
msg = buf_msg(buf);
/* Create destination port list, if one wasn't supplied */
@@ -196,7 +195,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
if (b == NULL) {
- warn("Buffer allocation failure\n");
+ warn("Unable to deliver multicast message(s)\n");
msg_dbg(msg, "LOST:");
goto exit;
}
@@ -228,14 +227,14 @@ u32 tipc_createport_raw(void *usr_handle,
u32 ref;
p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
- if (p_ptr == NULL) {
- warn("Memory squeeze; failed to create port\n");
+ if (!p_ptr) {
+ warn("Port creation failed, no memory\n");
return 0;
}
memset(p_ptr, 0, sizeof(*p_ptr));
ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
if (!ref) {
- warn("Reference Table Exhausted\n");
+ warn("Port creation failed, reference table exhausted\n");
kfree(p_ptr);
return 0;
}
@@ -810,18 +809,20 @@ static void port_dispatcher_sigh(void *dummy)
void *usr_handle;
int connected;
int published;
+ u32 message_type;
struct sk_buff *next = buf->next;
struct tipc_msg *msg = buf_msg(buf);
u32 dref = msg_destport(msg);
+ message_type = msg_type(msg);
+ if (message_type > TIPC_DIRECT_MSG)
+ goto reject; /* Unsupported message type */
+
p_ptr = tipc_port_lock(dref);
- if (!p_ptr) {
- /* Port deleted while msg in queue */
- tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
- buf = next;
- continue;
- }
+ if (!p_ptr)
+ goto reject; /* Port deleted while msg in queue */
+
orig.ref = msg_origport(msg);
orig.node = msg_orignode(msg);
up_ptr = p_ptr->user_port;
@@ -832,7 +833,7 @@ static void port_dispatcher_sigh(void *dummy)
if (unlikely(msg_errcode(msg)))
goto err;
- switch (msg_type(msg)) {
+ switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
@@ -874,6 +875,7 @@ static void port_dispatcher_sigh(void *dummy)
&orig);
break;
}
+ case TIPC_MCAST_MSG:
case TIPC_NAMED_MSG:{
tipc_named_msg_event cb = up_ptr->named_msg_cb;
@@ -886,7 +888,8 @@ static void port_dispatcher_sigh(void *dummy)
goto reject;
dseq.type = msg_nametype(msg);
dseq.lower = msg_nameinst(msg);
- dseq.upper = dseq.lower;
+ dseq.upper = (message_type == TIPC_NAMED_MSG)
+ ? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_importance(msg),
@@ -899,7 +902,7 @@ static void port_dispatcher_sigh(void *dummy)
buf = next;
continue;
err:
- switch (msg_type(msg)) {
+ switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_shutdown_event cb =
@@ -931,6 +934,7 @@ err:
msg_data_sz(msg), msg_errcode(msg), &orig);
break;
}
+ case TIPC_MCAST_MSG:
case TIPC_NAMED_MSG:{
tipc_named_msg_err_event cb =
up_ptr->named_err_cb;
@@ -940,7 +944,8 @@ err:
break;
dseq.type = msg_nametype(msg);
dseq.lower = msg_nameinst(msg);
- dseq.upper = dseq.lower;
+ dseq.upper = (message_type == TIPC_NAMED_MSG)
+ ? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_errcode(msg), &dseq);
@@ -1054,7 +1059,8 @@ int tipc_createport(u32 user_ref,
u32 ref;
up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
- if (up_ptr == NULL) {
+ if (!up_ptr) {
+ warn("Port creation failed, no memory\n");
return -ENOMEM;
}
ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
@@ -1165,8 +1171,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- if (!p_ptr->publ.published)
- goto exit;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@@ -1193,7 +1197,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
if (list_empty(&p_ptr->publications))
p_ptr->publ.published = 0;
-exit:
tipc_port_unlock(p_ptr);
return res;
}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 33bbf5095094..596d3c8ff750 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -63,7 +63,7 @@
struct ref_table tipc_ref_table = { NULL };
-static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ref_table_lock);
/**
* tipc_ref_table_init - create reference table for objects
@@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
index_mask = sz - 1;
for (i = sz - 1; i >= 0; i--) {
table[i].object = NULL;
- table[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&table[i].lock);
table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
}
tipc_ref_table.entries = table;
@@ -127,7 +127,14 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
u32 next_plus_upper;
u32 reference = 0;
- assert(tipc_ref_table.entries && object);
+ if (!object) {
+ err("Attempt to acquire reference to non-existent object\n");
+ return 0;
+ }
+ if (!tipc_ref_table.entries) {
+ err("Reference table not found during acquisition attempt\n");
+ return 0;
+ }
write_lock_bh(&ref_table_lock);
if (tipc_ref_table.first_free) {
@@ -162,15 +169,28 @@ void tipc_ref_discard(u32 ref)
u32 index;
u32 index_mask;
- assert(tipc_ref_table.entries);
- assert(ref != 0);
+ if (!ref) {
+ err("Attempt to discard reference 0\n");
+ return;
+ }
+ if (!tipc_ref_table.entries) {
+ err("Reference table not found during discard attempt\n");
+ return;
+ }
write_lock_bh(&ref_table_lock);
index_mask = tipc_ref_table.index_mask;
index = ref & index_mask;
entry = &(tipc_ref_table.entries[index]);
- assert(entry->object != 0);
- assert(entry->data.reference == ref);
+
+ if (!entry->object) {
+ err("Attempt to discard reference to non-existent object\n");
+ goto exit;
+ }
+ if (entry->data.reference != ref) {
+ err("Attempt to discard non-existent reference\n");
+ goto exit;
+ }
/* mark entry as unused */
entry->object = NULL;
@@ -184,6 +204,7 @@ void tipc_ref_discard(u32 ref)
/* increment upper bits of entry to invalidate subsequent references */
entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
+exit:
write_unlock_bh(&ref_table_lock);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 648a734e6044..32d778448a00 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -169,12 +169,6 @@ static int tipc_create(struct socket *sock, int protocol)
struct sock *sk;
u32 ref;
- if ((sock->type != SOCK_STREAM) &&
- (sock->type != SOCK_SEQPACKET) &&
- (sock->type != SOCK_DGRAM) &&
- (sock->type != SOCK_RDM))
- return -EPROTOTYPE;
-
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
@@ -199,6 +193,9 @@ static int tipc_create(struct socket *sock, int protocol)
sock->ops = &msg_ops;
sock->state = SS_READY;
break;
+ default:
+ tipc_deleteport(ref);
+ return -EPROTOTYPE;
}
sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
@@ -426,7 +423,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
return -EFAULT;
- if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
+ if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
return -EACCES;
return 0;
@@ -437,7 +434,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
* @iocb: (unused)
* @sock: socket structure
* @m: message to send
- * @total_len: (unused)
+ * @total_len: length of message
*
* Message must have an destination specified explicitly.
* Used for SOCK_RDM and SOCK_DGRAM messages,
@@ -458,7 +455,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
if (unlikely(!dest))
return -EDESTADDRREQ;
- if (unlikely(dest->family != AF_TIPC))
+ if (unlikely((m->msg_namelen < sizeof(*dest)) ||
+ (dest->family != AF_TIPC)))
return -EINVAL;
needs_conn = (sock->state != SS_READY);
@@ -470,6 +468,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
if ((tsock->p->published) ||
((sock->type == SOCK_STREAM) && (total_len != 0)))
return -EOPNOTSUPP;
+ if (dest->addrtype == TIPC_ADDR_NAME) {
+ tsock->p->conn_type = dest->addr.name.name.type;
+ tsock->p->conn_instance = dest->addr.name.name.instance;
+ }
}
if (down_interruptible(&tsock->sem))
@@ -538,7 +540,7 @@ exit:
* @iocb: (unused)
* @sock: socket structure
* @m: message to send
- * @total_len: (unused)
+ * @total_len: length of message
*
* Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
*
@@ -561,15 +563,15 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
return -ERESTARTSYS;
}
- if (unlikely(sock->state != SS_CONNECTED)) {
- if (sock->state == SS_DISCONNECTING)
- res = -EPIPE;
- else
- res = -ENOTCONN;
- goto exit;
- }
-
do {
+ if (unlikely(sock->state != SS_CONNECTED)) {
+ if (sock->state == SS_DISCONNECTING)
+ res = -EPIPE;
+ else
+ res = -ENOTCONN;
+ goto exit;
+ }
+
res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
if (likely(res != -ELINKCONG)) {
exit:
@@ -597,7 +599,8 @@ exit:
*
* Used for SOCK_STREAM data.
*
- * Returns the number of bytes sent on success, or errno otherwise
+ * Returns the number of bytes sent on success (or partial success),
+ * or errno if no data sent
*/
@@ -611,6 +614,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
char __user *curr_start;
int curr_left;
int bytes_to_send;
+ int bytes_sent;
int res;
if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
@@ -633,11 +637,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
* of small iovec entries into send_packet().
*/
- my_msg = *m;
- curr_iov = my_msg.msg_iov;
- curr_iovlen = my_msg.msg_iovlen;
+ curr_iov = m->msg_iov;
+ curr_iovlen = m->msg_iovlen;
my_msg.msg_iov = &my_iov;
my_msg.msg_iovlen = 1;
+ bytes_sent = 0;
while (curr_iovlen--) {
curr_start = curr_iov->iov_base;
@@ -648,16 +652,18 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
? curr_left : TIPC_MAX_USER_MSG_SIZE;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
- if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
- return res;
+ if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
+ return bytes_sent ? bytes_sent : res;
+ }
curr_left -= bytes_to_send;
curr_start += bytes_to_send;
+ bytes_sent += bytes_to_send;
}
curr_iov++;
}
- return total_len;
+ return bytes_sent;
}
/**
@@ -727,6 +733,7 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
u32 anc_data[3];
u32 err;
u32 dest_type;
+ int has_name;
int res;
if (likely(m->msg_controllen == 0))
@@ -738,10 +745,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
if (unlikely(err)) {
anc_data[0] = err;
anc_data[1] = msg_data_sz(msg);
- if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
+ if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
return res;
if (anc_data[1] &&
- (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1],
+ (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
msg_data(msg))))
return res;
}
@@ -751,25 +758,28 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
switch (dest_type) {
case TIPC_NAMED_MSG:
+ has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_namelower(msg);
break;
case TIPC_MCAST_MSG:
+ has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_nameupper(msg);
break;
case TIPC_CONN_MSG:
+ has_name = (tport->conn_type != 0);
anc_data[0] = tport->conn_type;
anc_data[1] = tport->conn_instance;
anc_data[2] = tport->conn_instance;
break;
default:
- anc_data[0] = 0;
+ has_name = 0;
}
- if (anc_data[0] &&
- (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
+ if (has_name &&
+ (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
return res;
return 0;
@@ -960,7 +970,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
restart:
if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
(flags & MSG_DONTWAIT))) {
- res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
+ res = -EWOULDBLOCK;
goto exit;
}
@@ -1051,7 +1061,7 @@ restart:
exit:
up(&tsock->sem);
- return res ? res : sz_copied;
+ return sz_copied ? sz_copied : res;
}
/**
@@ -1236,7 +1246,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (sock->state == SS_READY)
return -EOPNOTSUPP;
- /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
+ /* Issue Posix-compliant error code if socket is in the wrong state */
+
if (sock->state == SS_LISTENING)
return -EOPNOTSUPP;
if (sock->state == SS_CONNECTING)
@@ -1244,13 +1255,20 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (sock->state != SS_UNCONNECTED)
return -EISCONN;
- if ((dst->family != AF_TIPC) ||
- ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
+ /*
+ * Reject connection attempt using multicast address
+ *
+ * Note: send_msg() validates the rest of the address fields,
+ * so there's no need to do it here
+ */
+
+ if (dst->addrtype == TIPC_ADDR_MCAST)
return -EINVAL;
/* Send a 'SYN-' to destination */
m.msg_name = dest;
+ m.msg_namelen = destlen;
if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
sock->state = SS_DISCONNECTING;
return res;
@@ -1269,10 +1287,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
msg = buf_msg(buf);
res = auto_connect(sock, tsock, msg);
if (!res) {
- if (dst->addrtype == TIPC_ADDR_NAME) {
- tsock->p->conn_type = dst->addr.name.name.type;
- tsock->p->conn_instance = dst->addr.name.name.instance;
- }
if (!msg_data_sz(msg))
advance_queue(tsock);
}
@@ -1386,7 +1400,7 @@ exit:
/**
* shutdown - shutdown socket connection
* @sock: socket structure
- * @how: direction to close (always treated as read + write)
+ * @how: direction to close (unused; always treated as read + write)
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
@@ -1469,7 +1483,8 @@ restart:
* Returns 0 on success, errno otherwise
*/
-static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
+static int setsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, int ol)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
u32 value;
@@ -1525,7 +1540,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
* Returns 0 on success, errno otherwise
*/
-static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
+static int getsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, int *ol)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
int len;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index c5f026c7fd38..e19b4bcd67ec 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -266,7 +266,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
/* Refuse subscription if global limit exceeded */
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
- warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
+ warn("Subscription rejected, subscription limit reached (%u)\n",
+ tipc_max_subscriptions);
subscr_terminate(subscriber);
return;
}
@@ -274,8 +275,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
- if (sub == NULL) {
- warn("Memory squeeze; ignoring subscription\n");
+ if (!sub) {
+ warn("Subscription rejected, no memory\n");
subscr_terminate(subscriber);
return;
}
@@ -298,8 +299,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
if ((((sub->filter != TIPC_SUB_PORTS)
&& (sub->filter != TIPC_SUB_SERVICE)))
|| (sub->seq.lower > sub->seq.upper)) {
- warn("Rejecting illegal subscription %u,%u,%u\n",
- sub->seq.type, sub->seq.lower, sub->seq.upper);
+ warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
return;
@@ -387,7 +387,7 @@ static void subscr_named_msg_event(void *usr_handle,
dbg("subscr_named_msg_event: orig = %x own = %x,\n",
orig->node, tipc_own_addr);
if (size && (size != sizeof(struct tipc_subscr))) {
- warn("Received tipc_subscr of invalid size\n");
+ warn("Subscriber rejected, invalid subscription size\n");
return;
}
@@ -395,7 +395,7 @@ static void subscr_named_msg_event(void *usr_handle,
subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
- warn("Memory squeeze; ignoring subscriber setup\n");
+ warn("Subscriber rejected, no memory\n");
return;
}
memset(subscriber, 0, sizeof(struct subscriber));
@@ -403,7 +403,7 @@ static void subscr_named_msg_event(void *usr_handle,
INIT_LIST_HEAD(&subscriber->subscriber_list);
subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
if (subscriber->ref == 0) {
- warn("Failed to acquire subscriber reference\n");
+ warn("Subscriber rejected, reference table exhausted\n");
kfree(subscriber);
return;
}
@@ -422,7 +422,7 @@ static void subscr_named_msg_event(void *usr_handle,
NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
- warn("Memory squeeze; failed to create subscription port\n");
+ warn("Subscriber rejected, unable to create port\n");
tipc_ref_discard(subscriber->ref);
kfree(subscriber);
return;
@@ -457,7 +457,7 @@ int tipc_subscr_start(void)
int res = -1;
memset(&topsrv, 0, sizeof (topsrv));
- topsrv.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
spin_lock_bh(&topsrv.lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 3f3f933976e9..1e3ae57c7228 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -67,7 +67,7 @@ struct tipc_user {
static struct tipc_user *users = NULL;
static u32 next_free_user = MAX_USERID + 1;
-static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(reg_lock);
/**
* reg_init - create TIPC user registry (but don't activate it)
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 2803e1b4f170..316c4872ff5b 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -44,19 +44,24 @@
struct _zone *tipc_zone_create(u32 addr)
{
- struct _zone *z_ptr = NULL;
+ struct _zone *z_ptr;
u32 z_num;
- if (!tipc_addr_domain_valid(addr))
+ if (!tipc_addr_domain_valid(addr)) {
+ err("Zone creation failed, invalid domain 0x%x\n", addr);
return NULL;
+ }
z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
- if (z_ptr != NULL) {
- memset(z_ptr, 0, sizeof(*z_ptr));
- z_num = tipc_zone(addr);
- z_ptr->addr = tipc_addr(z_num, 0, 0);
- tipc_net.zones[z_num] = z_ptr;
+ if (!z_ptr) {
+ warn("Zone creation failed, insufficient memory\n");
+ return NULL;
}
+
+ memset(z_ptr, 0, sizeof(*z_ptr));
+ z_num = tipc_zone(addr);
+ z_ptr->addr = tipc_addr(z_num, 0, 0);
+ tipc_net.zones[z_num] = z_ptr;
return z_ptr;
}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index e48e60da3040..02a7eea5fdbc 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -8,7 +8,7 @@ PHONY := __build
__build:
# Read .config if it exist, otherwise ignore
--include .config
+-include include/config/auto.conf
include scripts/Kbuild.include
@@ -140,6 +140,15 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $<
%.i: %.c FORCE
$(call if_changed_dep,cc_i_c)
+quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
+cmd_cc_symtypes_c = \
+ $(CPP) -D__GENKSYMS__ $(c_flags) $< \
+ | $(GENKSYMS) -T $@ >/dev/null; \
+ test -s $@ || rm -f $@
+
+%.symtypes : %.c FORCE
+ $(call if_changed_dep,cc_symtypes_c)
+
# C (.c) files
# The C file is compiled and updated dependency information is generated.
# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
@@ -166,7 +175,8 @@ cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
cmd_modversions = \
if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
$(CPP) -D__GENKSYMS__ $(c_flags) $< \
- | $(GENKSYMS) -a $(ARCH) \
+ | $(GENKSYMS) $(if $(KBUILD_SYMTYPES), \
+ -T $(@D)/$(@F:.o=.symtypes)) -a $(ARCH) \
> $(@D)/.tmp_$(@F:.o=.ver); \
\
$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 2d519704b8fd..2b066d12af2c 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -33,8 +33,8 @@
__hostprogs := $(sort $(hostprogs-y)$(hostprogs-m))
# hostprogs-y := tools/build may have been specified. Retreive directory
-obj-dirs += $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f))))
-obj-dirs := $(strip $(sort $(filter-out ./,$(obj-dirs))))
+host-objdirs := $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f))))
+host-objdirs := $(strip $(sort $(filter-out ./,$(host-objdirs))))
# C code
@@ -73,7 +73,9 @@ host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
-obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
+host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
+
+obj-dirs += $(host-objdirs)
#####
# Handle options to gcc. Support building with separate output directory
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 2686dd5dce8c..f0ff248f5e6f 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -17,7 +17,7 @@ __modinst: $(modules)
@:
quiet_cmd_modules_install = INSTALL $@
- cmd_modules_install = mkdir -p $(2); cp $@ $(2)
+ cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@)
# Modules built outside the kernel source tree go into extra by default
INSTALL_MOD_DIR ?= extra
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 0e056cffffdb..576cce5e387f 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -35,7 +35,7 @@
PHONY := _modpost
_modpost: __modpost
-include .config
+include include/config/auto.conf
include scripts/Kbuild.include
include scripts/Makefile.lib
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index f22e94c3a2d1..2f60070f9733 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -1,17 +1,15 @@
###
# Makefile.basic list the most basic programs used during the build process.
# The programs listed herein is what is needed to do the basic stuff,
-# such as splitting .config and fix dependency file.
+# such as fix dependency file.
# This initial step is needed to avoid files to be recompiled
# when kernel configuration changes (which is what happens when
# .config is included by main Makefile.
# ---------------------------------------------------------------------------
# fixdep: Used to generate dependency information during build process
-# split-include: Divide all config symbols up in a number of files in
-# include/config/...
# docproc: Used in Documentation/docbook
-hostprogs-y := fixdep split-include docproc
+hostprogs-y := fixdep docproc
always := $(hostprogs-y)
# fixdep is needed to compile other host programs
diff --git a/scripts/basic/split-include.c b/scripts/basic/split-include.c
deleted file mode 100644
index 459c45276cb1..000000000000
--- a/scripts/basic/split-include.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * split-include.c
- *
- * Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
- * This is a C version of syncdep.pl by Werner Almesberger.
- *
- * This program takes autoconf.h as input and outputs a directory full
- * of one-line include files, merging onto the old values.
- *
- * Think of the configuration options as key-value pairs. Then there
- * are five cases:
- *
- * key old value new value action
- *
- * KEY-1 VALUE-1 VALUE-1 leave file alone
- * KEY-2 VALUE-2A VALUE-2B write VALUE-2B into file
- * KEY-3 - VALUE-3 write VALUE-3 into file
- * KEY-4 VALUE-4 - write an empty file
- * KEY-5 (empty) - leave old empty file alone
- */
-
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <ctype.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#define ERROR_EXIT(strExit) \
- { \
- const int errnoSave = errno; \
- fprintf(stderr, "%s: ", str_my_name); \
- errno = errnoSave; \
- perror((strExit)); \
- exit(1); \
- }
-
-
-
-int main(int argc, const char * argv [])
-{
- const char * str_my_name;
- const char * str_file_autoconf;
- const char * str_dir_config;
-
- FILE * fp_config;
- FILE * fp_target;
- FILE * fp_find;
-
- int buffer_size;
-
- char * line;
- char * old_line;
- char * list_target;
- char * ptarget;
-
- struct stat stat_buf;
-
- /* Check arg count. */
- if (argc != 3)
- {
- fprintf(stderr, "%s: wrong number of arguments.\n", argv[0]);
- exit(1);
- }
-
- str_my_name = argv[0];
- str_file_autoconf = argv[1];
- str_dir_config = argv[2];
-
- /* Find a buffer size. */
- if (stat(str_file_autoconf, &stat_buf) != 0)
- ERROR_EXIT(str_file_autoconf);
- buffer_size = 2 * stat_buf.st_size + 4096;
-
- /* Allocate buffers. */
- if ( (line = malloc(buffer_size)) == NULL
- || (old_line = malloc(buffer_size)) == NULL
- || (list_target = malloc(buffer_size)) == NULL )
- ERROR_EXIT(str_file_autoconf);
-
- /* Open autoconfig file. */
- if ((fp_config = fopen(str_file_autoconf, "r")) == NULL)
- ERROR_EXIT(str_file_autoconf);
-
- /* Make output directory if needed. */
- if (stat(str_dir_config, &stat_buf) != 0)
- {
- if (mkdir(str_dir_config, 0755) != 0)
- ERROR_EXIT(str_dir_config);
- }
-
- /* Change to output directory. */
- if (chdir(str_dir_config) != 0)
- ERROR_EXIT(str_dir_config);
-
- /* Put initial separator into target list. */
- ptarget = list_target;
- *ptarget++ = '\n';
-
- /* Read config lines. */
- while (fgets(line, buffer_size, fp_config))
- {
- const char * str_config;
- int is_same;
- int itarget;
-
- if (line[0] != '#')
- continue;
- if ((str_config = strstr(line, "CONFIG_")) == NULL)
- continue;
-
- /* Make the output file name. */
- str_config += sizeof("CONFIG_") - 1;
- for (itarget = 0; !isspace(str_config[itarget]); itarget++)
- {
- int c = (unsigned char) str_config[itarget];
- if (isupper(c)) c = tolower(c);
- if (c == '_') c = '/';
- ptarget[itarget] = c;
- }
- ptarget[itarget++] = '.';
- ptarget[itarget++] = 'h';
- ptarget[itarget++] = '\0';
-
- /* Check for existing file. */
- is_same = 0;
- if ((fp_target = fopen(ptarget, "r")) != NULL)
- {
- fgets(old_line, buffer_size, fp_target);
- if (fclose(fp_target) != 0)
- ERROR_EXIT(ptarget);
- if (!strcmp(line, old_line))
- is_same = 1;
- }
-
- if (!is_same)
- {
- /* Auto-create directories. */
- int islash;
- for (islash = 0; islash < itarget; islash++)
- {
- if (ptarget[islash] == '/')
- {
- ptarget[islash] = '\0';
- if (stat(ptarget, &stat_buf) != 0
- && mkdir(ptarget, 0755) != 0)
- ERROR_EXIT( ptarget );
- ptarget[islash] = '/';
- }
- }
-
- /* Write the file. */
- if ((fp_target = fopen(ptarget, "w" )) == NULL)
- ERROR_EXIT(ptarget);
- fputs(line, fp_target);
- if (ferror(fp_target) || fclose(fp_target) != 0)
- ERROR_EXIT(ptarget);
- }
-
- /* Update target list */
- ptarget += itarget;
- *(ptarget-1) = '\n';
- }
-
- /*
- * Close autoconfig file.
- * Terminate the target list.
- */
- if (fclose(fp_config) != 0)
- ERROR_EXIT(str_file_autoconf);
- *ptarget = '\0';
-
- /*
- * Fix up existing files which have no new value.
- * This is Case 4 and Case 5.
- *
- * I re-read the tree and filter it against list_target.
- * This is crude. But it avoids data copies. Also, list_target
- * is compact and contiguous, so it easily fits into cache.
- *
- * Notice that list_target contains strings separated by \n,
- * with a \n before the first string and after the last.
- * fgets gives the incoming names a terminating \n.
- * So by having an initial \n, strstr will find exact matches.
- */
-
- fp_find = popen("find * -type f -name \"*.h\" -print", "r");
- if (fp_find == 0)
- ERROR_EXIT( "find" );
-
- line[0] = '\n';
- while (fgets(line+1, buffer_size, fp_find))
- {
- if (strstr(list_target, line) == NULL)
- {
- /*
- * This is an old file with no CONFIG_* flag in autoconf.h.
- */
-
- /* First strip the \n. */
- line[strlen(line)-1] = '\0';
-
- /* Grab size. */
- if (stat(line+1, &stat_buf) != 0)
- ERROR_EXIT(line);
-
- /* If file is not empty, make it empty and give it a fresh date. */
- if (stat_buf.st_size != 0)
- {
- if ((fp_target = fopen(line+1, "w")) == NULL)
- ERROR_EXIT(line);
- if (fclose(fp_target) != 0)
- ERROR_EXIT(line);
- }
- }
- }
-
- if (pclose(fp_find) != 0)
- ERROR_EXIT("find");
-
- return 0;
-}
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
new file mode 100644
index 000000000000..9ed00d9bb0a7
--- /dev/null
+++ b/scripts/export_report.pl
@@ -0,0 +1,169 @@
+#!/usr/bin/perl -w
+#
+# (C) Copyright IBM Corporation 2006.
+# Released under GPL v2.
+# Author : Ram Pai (linuxram@us.ibm.com)
+#
+# Usage: export_report.pl -k Module.symvers [-o report_file ] -f *.mod.c
+#
+
+use Getopt::Std;
+use strict;
+
+sub numerically {
+ my $no1 = (split /\s+/, $a)[1];
+ my $no2 = (split /\s+/, $b)[1];
+ return $no1 <=> $no2;
+}
+
+sub alphabetically {
+ my ($module1, $value1) = @{$a};
+ my ($module2, $value2) = @{$b};
+ return $value1 <=> $value2 || $module2 cmp $module1;
+}
+
+sub print_depends_on {
+ my ($href) = @_;
+ print "\n";
+ while (my ($mod, $list) = each %$href) {
+ print "\t$mod:\n";
+ foreach my $sym (sort numerically @{$list}) {
+ my ($symbol, $no) = split /\s+/, $sym;
+ printf("\t\t%-25s\t%-25d\n", $symbol, $no);
+ }
+ print "\n";
+ }
+ print "\n";
+ print "~"x80 , "\n";
+}
+
+sub usage {
+ print "Usage: @_ -h -k Module.symvers [ -o outputfile ] \n",
+ "\t-f: treat all the non-option argument as .mod.c files. ",
+ "Recommend using this as the last option\n",
+ "\t-h: print detailed help\n",
+ "\t-k: the path to Module.symvers file. By default uses ",
+ "the file from the current directory\n",
+ "\t-o outputfile: output the report to outputfile\n";
+ exit 0;
+}
+
+sub collectcfiles {
+ my @file = `cat .tmp_versions/*.mod | grep '.*\.ko\$'`;
+ @file = grep {s/\.ko/.mod.c/} @file;
+ chomp @file;
+ return @file;
+}
+
+my (%SYMBOL, %MODULE, %opt, @allcfiles);
+
+if (not getopts('hk:o:f',\%opt) or defined $opt{'h'}) {
+ usage($0);
+}
+
+if (defined $opt{'f'}) {
+ @allcfiles = @ARGV;
+} else {
+ @allcfiles = collectcfiles();
+}
+
+if (not defined $opt{'k'}) {
+ $opt{'k'} = "Module.symvers";
+}
+
+unless (open(MODULE_SYMVERS, $opt{'k'})) {
+ die "Sorry, cannot open $opt{'k'}: $!\n";
+}
+
+if (defined $opt{'o'}) {
+ unless (open(OUTPUT_HANDLE, ">$opt{'o'}")) {
+ die "Sorry, cannot open $opt{'o'} $!\n";
+ }
+ select OUTPUT_HANDLE;
+}
+#
+# collect all the symbols and their attributes from the
+# Module.symvers file
+#
+while ( <MODULE_SYMVERS> ) {
+ chomp;
+ my (undef, $symbol, $module, $gpl) = split;
+ $SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl];
+}
+close(MODULE_SYMVERS);
+
+#
+# collect the usage count of each symbol.
+#
+foreach my $thismod (@allcfiles) {
+ unless (open(MODULE_MODULE, $thismod)) {
+ print "Sorry, cannot open $thismod: $!\n";
+ next;
+ }
+ my $state=0;
+ while ( <MODULE_MODULE> ) {
+ chomp;
+ if ($state eq 0) {
+ $state = 1 if ($_ =~ /static const struct modversion_info/);
+ next;
+ }
+ if ($state eq 1) {
+ $state = 2 if ($_ =~ /__attribute__\(\(section\("__versions"\)\)\)/);
+ next;
+ }
+ if ($state eq 2) {
+ if ( $_ !~ /0x[0-9a-f]{7,8},/ ) {
+ next;
+ }
+ my $sym = (split /([,"])/,)[4];
+ my ($module, $value, $symbol, $gpl) = @{$SYMBOL{$sym}};
+ $SYMBOL{ $sym } = [ $module, $value+1, $symbol, $gpl];
+ push(@{$MODULE{$thismod}} , $sym);
+ }
+ }
+ if ($state ne 2) {
+ print "WARNING:$thismod is not built with CONFIG_MODVERSION enabled\n";
+ }
+ close(MODULE_MODULE);
+}
+
+print "\tThis file reports the exported symbols usage patterns by in-tree\n",
+ "\t\t\t\tmodules\n";
+printf("%s\n\n\n","x"x80);
+printf("\t\t\t\tINDEX\n\n\n");
+printf("SECTION 1: Usage counts of all exported symbols\n");
+printf("SECTION 2: List of modules and the exported symbols they use\n");
+printf("%s\n\n\n","x"x80);
+printf("SECTION 1:\tThe exported symbols and their usage count\n\n");
+printf("%-25s\t%-25s\t%-5s\t%-25s\n", "Symbol", "Module", "Usage count",
+ "export type");
+
+#
+# print the list of unused exported symbols
+#
+foreach my $list (sort alphabetically values(%SYMBOL)) {
+ my ($module, $value, $symbol, $gpl) = @{$list};
+ printf("%-25s\t%-25s\t%-10s\t", $symbol, $module, $value);
+ if (defined $gpl) {
+ printf("%-25s\n",$gpl);
+ } else {
+ printf("\n");
+ }
+}
+printf("%s\n\n\n","x"x80);
+
+printf("SECTION 2:\n\tThis section reports export-symbol-usage of in-kernel
+modules. Each module lists the modules, and the symbols from that module that
+it uses. Each listed symbol reports the number of modules using it\n");
+
+print "~"x80 , "\n";
+while (my ($thismod, $list) = each %MODULE) {
+ my %depends;
+ $thismod =~ s/\.mod\.c/.ko/;
+ print "\t\t\t$thismod\n";
+ foreach my $symbol (@{$list}) {
+ my ($module, $value, undef, $gpl) = @{$SYMBOL{$symbol}};
+ push (@{$depends{"$module"}}, "$symbol $value");
+ }
+ print_depends_on(\%depends);
+}
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 5b0344e20d61..b0381823e404 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -42,7 +42,7 @@ static FILE *debugfile;
int cur_line = 1;
char *cur_filename;
-static int flag_debug, flag_dump_defs, flag_warnings;
+static int flag_debug, flag_dump_defs, flag_dump_types, flag_warnings;
static const char *arch = "";
static const char *mod_prefix = "";
@@ -50,6 +50,7 @@ static int errors;
static int nsyms;
static struct symbol *expansion_trail;
+static struct symbol *visited_symbols;
static const char *const symbol_type_name[] = {
"normal", "typedef", "enum", "struct", "union"
@@ -176,6 +177,7 @@ struct symbol *add_symbol(const char *name, enum symbol_type type,
sym->type = type;
sym->defn = defn;
sym->expansion_trail = NULL;
+ sym->visited = NULL;
sym->is_extern = is_extern;
sym->hash_next = symtab[h];
@@ -236,26 +238,11 @@ static int equal_list(struct string_list *a, struct string_list *b)
static void print_node(FILE * f, struct string_list *list)
{
- switch (list->tag) {
- case SYM_STRUCT:
- putc('s', f);
- goto printit;
- case SYM_UNION:
- putc('u', f);
- goto printit;
- case SYM_ENUM:
- putc('e', f);
- goto printit;
- case SYM_TYPEDEF:
- putc('t', f);
- goto printit;
-
- printit:
+ if (list->tag != SYM_NORMAL) {
+ putc(symbol_type_name[list->tag][0], f);
putc('#', f);
- case SYM_NORMAL:
- fputs(list->string, f);
- break;
}
+ fputs(list->string, f);
}
static void print_list(FILE * f, struct string_list *list)
@@ -287,9 +274,9 @@ static void print_list(FILE * f, struct string_list *list)
}
}
-static unsigned long expand_and_crc_list(struct string_list *list,
- unsigned long crc)
+static unsigned long expand_and_crc_sym(struct symbol *sym, unsigned long crc)
{
+ struct string_list *list = sym->defn;
struct string_list **e, **b;
struct string_list *tmp, **tmp2;
int elem = 1;
@@ -332,7 +319,7 @@ static unsigned long expand_and_crc_list(struct string_list *list,
} else {
subsym->expansion_trail = expansion_trail;
expansion_trail = subsym;
- crc = expand_and_crc_list(subsym->defn, crc);
+ crc = expand_and_crc_sym(subsym, crc);
}
break;
@@ -382,12 +369,22 @@ static unsigned long expand_and_crc_list(struct string_list *list,
} else {
subsym->expansion_trail = expansion_trail;
expansion_trail = subsym;
- crc = expand_and_crc_list(subsym->defn, crc);
+ crc = expand_and_crc_sym(subsym, crc);
}
break;
}
}
+ {
+ static struct symbol **end = &visited_symbols;
+
+ if (!sym->visited) {
+ *end = sym;
+ end = &sym->visited;
+ sym->visited = (struct symbol *)-1L;
+ }
+ }
+
return crc;
}
@@ -406,7 +403,7 @@ void export_symbol(const char *name)
expansion_trail = (struct symbol *)-1L;
- crc = expand_and_crc_list(sym->defn, 0xffffffff) ^ 0xffffffff;
+ crc = expand_and_crc_sym(sym, 0xffffffff) ^ 0xffffffff;
sym = expansion_trail;
while (sym != (struct symbol *)-1L) {
@@ -464,6 +461,7 @@ static void genksyms_usage(void)
int main(int argc, char **argv)
{
+ FILE *dumpfile = NULL;
int o;
#ifdef __GNU_LIBRARY__
@@ -473,15 +471,16 @@ int main(int argc, char **argv)
{"warnings", 0, 0, 'w'},
{"quiet", 0, 0, 'q'},
{"dump", 0, 0, 'D'},
+ {"dump-types", 1, 0, 'T'},
{"version", 0, 0, 'V'},
{"help", 0, 0, 'h'},
{0, 0, 0, 0}
};
- while ((o = getopt_long(argc, argv, "a:dwqVDk:p:",
+ while ((o = getopt_long(argc, argv, "a:dwqVDT:k:p:",
&long_opts[0], NULL)) != EOF)
#else /* __GNU_LIBRARY__ */
- while ((o = getopt(argc, argv, "a:dwqVDk:p:")) != EOF)
+ while ((o = getopt(argc, argv, "a:dwqVDT:k:p:")) != EOF)
#endif /* __GNU_LIBRARY__ */
switch (o) {
case 'a':
@@ -502,6 +501,14 @@ int main(int argc, char **argv)
case 'D':
flag_dump_defs = 1;
break;
+ case 'T':
+ flag_dump_types = 1;
+ dumpfile = fopen(optarg, "w");
+ if (!dumpfile) {
+ perror(optarg);
+ return 1;
+ }
+ break;
case 'h':
genksyms_usage();
return 0;
@@ -524,6 +531,24 @@ int main(int argc, char **argv)
yyparse();
+ if (flag_dump_types && visited_symbols) {
+ while (visited_symbols != (struct symbol *)-1L) {
+ struct symbol *sym = visited_symbols;
+
+ if (sym->type != SYM_NORMAL) {
+ putc(symbol_type_name[sym->type][0], dumpfile);
+ putc('#', dumpfile);
+ }
+ fputs(sym->name, dumpfile);
+ putc(' ', dumpfile);
+ print_list(dumpfile, sym->defn);
+ putc('\n', dumpfile);
+
+ visited_symbols = sym->visited;
+ sym->visited = NULL;
+ }
+ }
+
if (flag_debug) {
fprintf(debugfile, "Hash table occupancy %d/%d = %g\n",
nsyms, HASH_BUCKETS,
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index ab6f34f38735..2668287aa498 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -41,6 +41,7 @@ struct symbol {
enum symbol_type type;
struct string_list *defn;
struct symbol *expansion_trail;
+ struct symbol *visited;
int is_extern;
};
diff --git a/scripts/genksyms/lex.c_shipped b/scripts/genksyms/lex.c_shipped
index 1218053ee960..37ba98241b96 100644
--- a/scripts/genksyms/lex.c_shipped
+++ b/scripts/genksyms/lex.c_shipped
@@ -2023,7 +2023,7 @@ repeat:
break;
default:
- abort();
+ exit(1);
}
fini:
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l
index fe0dfeedf0ff..5e544a06678b 100644
--- a/scripts/genksyms/lex.l
+++ b/scripts/genksyms/lex.l
@@ -392,7 +392,7 @@ repeat:
break;
default:
- abort();
+ exit(1);
}
fini:
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 8012d1076876..4dcb8867b5f4 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -539,6 +539,7 @@ int main(int ac, char **av)
name = av[i];
if (!name) {
printf(_("%s: Kconfig file missing\n"), av[0]);
+ exit(1);
}
conf_parse(name);
//zconfdump(stdout);
@@ -573,7 +574,7 @@ int main(int ac, char **av)
case set_random:
name = getenv("KCONFIG_ALLCONFIG");
if (name && !stat(name, &tmpstat)) {
- conf_read_simple(name);
+ conf_read_simple(name, S_DEF_USER);
break;
}
switch (input_mode) {
@@ -584,9 +585,9 @@ int main(int ac, char **av)
default: break;
}
if (!stat(name, &tmpstat))
- conf_read_simple(name);
+ conf_read_simple(name, S_DEF_USER);
else if (!stat("all.config", &tmpstat))
- conf_read_simple("all.config");
+ conf_read_simple("all.config", S_DEF_USER);
break;
default:
break;
@@ -599,7 +600,15 @@ int main(int ac, char **av)
input_mode = ask_silent;
valid_stdin = 1;
}
- }
+ } else if (sym_change_count) {
+ name = getenv("KCONFIG_NOSILENTUPDATE");
+ if (name && *name) {
+ fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n"));
+ return 1;
+ }
+ } else
+ goto skip_check;
+
do {
conf_cnt = 0;
check_conf(&rootmenu);
@@ -608,5 +617,11 @@ int main(int ac, char **av)
fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n"));
return 1;
}
+skip_check:
+ if (input_mode == ask_silent && conf_write_autoconf()) {
+ fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n"));
+ return 1;
+ }
+
return 0;
}
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 1b5df589f3ae..2ee48c377b66 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -5,6 +5,7 @@
#include <sys/stat.h>
#include <ctype.h>
+#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -20,19 +21,8 @@ static void conf_warning(const char *fmt, ...)
static const char *conf_filename;
static int conf_lineno, conf_warnings, conf_unsaved;
-const char conf_def_filename[] = ".config";
-
const char conf_defname[] = "arch/$ARCH/defconfig";
-const char *conf_confnames[] = {
- ".config",
- "/lib/modules/$UNAME_RELEASE/.config",
- "/etc/kernel-config",
- "/boot/config-$UNAME_RELEASE",
- conf_defname,
- NULL,
-};
-
static void conf_warning(const char *fmt, ...)
{
va_list ap;
@@ -44,6 +34,13 @@ static void conf_warning(const char *fmt, ...)
conf_warnings++;
}
+const char *conf_get_configname(void)
+{
+ char *name = getenv("KCONFIG_CONFIG");
+
+ return name ? name : ".config";
+}
+
static char *conf_expand_value(const char *in)
{
struct symbol *sym;
@@ -86,51 +83,65 @@ char *conf_get_default_confname(void)
return name;
}
-int conf_read_simple(const char *name)
+int conf_read_simple(const char *name, int def)
{
FILE *in = NULL;
char line[1024];
char *p, *p2;
struct symbol *sym;
- int i;
+ int i, def_flags;
if (name) {
in = zconf_fopen(name);
} else {
- const char **names = conf_confnames;
- while ((name = *names++)) {
- name = conf_expand_value(name);
+ struct property *prop;
+
+ name = conf_get_configname();
+ in = zconf_fopen(name);
+ if (in)
+ goto load;
+ sym_change_count++;
+ if (!sym_defconfig_list)
+ return 1;
+
+ for_all_defaults(sym_defconfig_list, prop) {
+ if (expr_calc_value(prop->visible.expr) == no ||
+ prop->expr->type != E_SYMBOL)
+ continue;
+ name = conf_expand_value(prop->expr->left.sym->name);
in = zconf_fopen(name);
if (in) {
printf(_("#\n"
- "# using defaults found in %s\n"
- "#\n"), name);
- break;
+ "# using defaults found in %s\n"
+ "#\n"), name);
+ goto load;
}
}
}
if (!in)
return 1;
+load:
conf_filename = name;
conf_lineno = 0;
conf_warnings = 0;
conf_unsaved = 0;
+ def_flags = SYMBOL_DEF << def;
for_all_symbols(i, sym) {
- sym->flags |= SYMBOL_NEW | SYMBOL_CHANGED;
+ sym->flags |= SYMBOL_CHANGED;
+ sym->flags &= ~(def_flags|SYMBOL_VALID);
if (sym_is_choice(sym))
- sym->flags &= ~SYMBOL_NEW;
- sym->flags &= ~SYMBOL_VALID;
+ sym->flags |= def_flags;
switch (sym->type) {
case S_INT:
case S_HEX:
case S_STRING:
- if (sym->user.val)
- free(sym->user.val);
+ if (sym->def[def].val)
+ free(sym->def[def].val);
default:
- sym->user.val = NULL;
- sym->user.tri = no;
+ sym->def[def].val = NULL;
+ sym->def[def].tri = no;
}
}
@@ -147,19 +158,26 @@ int conf_read_simple(const char *name)
*p++ = 0;
if (strncmp(p, "is not set", 10))
continue;
- sym = sym_find(line + 9);
- if (!sym) {
- conf_warning("trying to assign nonexistent symbol %s", line + 9);
- break;
- } else if (!(sym->flags & SYMBOL_NEW)) {
+ if (def == S_DEF_USER) {
+ sym = sym_find(line + 9);
+ if (!sym) {
+ conf_warning("trying to assign nonexistent symbol %s", line + 9);
+ break;
+ }
+ } else {
+ sym = sym_lookup(line + 9, 0);
+ if (sym->type == S_UNKNOWN)
+ sym->type = S_BOOLEAN;
+ }
+ if (sym->flags & def_flags) {
conf_warning("trying to reassign symbol %s", sym->name);
break;
}
switch (sym->type) {
case S_BOOLEAN:
case S_TRISTATE:
- sym->user.tri = no;
- sym->flags &= ~SYMBOL_NEW;
+ sym->def[def].tri = no;
+ sym->flags |= def_flags;
break;
default:
;
@@ -177,34 +195,48 @@ int conf_read_simple(const char *name)
p2 = strchr(p, '\n');
if (p2)
*p2 = 0;
- sym = sym_find(line + 7);
- if (!sym) {
- conf_warning("trying to assign nonexistent symbol %s", line + 7);
- break;
- } else if (!(sym->flags & SYMBOL_NEW)) {
+ if (def == S_DEF_USER) {
+ sym = sym_find(line + 7);
+ if (!sym) {
+ conf_warning("trying to assign nonexistent symbol %s", line + 7);
+ break;
+ }
+ } else {
+ sym = sym_lookup(line + 7, 0);
+ if (sym->type == S_UNKNOWN)
+ sym->type = S_OTHER;
+ }
+ if (sym->flags & def_flags) {
conf_warning("trying to reassign symbol %s", sym->name);
break;
}
switch (sym->type) {
case S_TRISTATE:
if (p[0] == 'm') {
- sym->user.tri = mod;
- sym->flags &= ~SYMBOL_NEW;
+ sym->def[def].tri = mod;
+ sym->flags |= def_flags;
break;
}
case S_BOOLEAN:
if (p[0] == 'y') {
- sym->user.tri = yes;
- sym->flags &= ~SYMBOL_NEW;
+ sym->def[def].tri = yes;
+ sym->flags |= def_flags;
break;
}
if (p[0] == 'n') {
- sym->user.tri = no;
- sym->flags &= ~SYMBOL_NEW;
+ sym->def[def].tri = no;
+ sym->flags |= def_flags;
break;
}
conf_warning("symbol value '%s' invalid for %s", p, sym->name);
break;
+ case S_OTHER:
+ if (*p != '"') {
+ for (p2 = p; *p2 && !isspace(*p2); p2++)
+ ;
+ sym->type = S_STRING;
+ goto done;
+ }
case S_STRING:
if (*p++ != '"')
break;
@@ -221,9 +253,10 @@ int conf_read_simple(const char *name)
}
case S_INT:
case S_HEX:
+ done:
if (sym_string_valid(sym, p)) {
- sym->user.val = strdup(p);
- sym->flags &= ~SYMBOL_NEW;
+ sym->def[def].val = strdup(p);
+ sym->flags |= def_flags;
} else {
conf_warning("symbol value '%s' invalid for %s", p, sym->name);
continue;
@@ -241,24 +274,24 @@ int conf_read_simple(const char *name)
}
if (sym && sym_is_choice_value(sym)) {
struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
- switch (sym->user.tri) {
+ switch (sym->def[def].tri) {
case no:
break;
case mod:
- if (cs->user.tri == yes) {
+ if (cs->def[def].tri == yes) {
conf_warning("%s creates inconsistent choice state", sym->name);
- cs->flags |= SYMBOL_NEW;
+ cs->flags &= ~def_flags;
}
break;
case yes:
- if (cs->user.tri != no) {
+ if (cs->def[def].tri != no) {
conf_warning("%s creates inconsistent choice state", sym->name);
- cs->flags |= SYMBOL_NEW;
+ cs->flags &= ~def_flags;
} else
- cs->user.val = sym;
+ cs->def[def].val = sym;
break;
}
- cs->user.tri = E_OR(cs->user.tri, sym->user.tri);
+ cs->def[def].tri = E_OR(cs->def[def].tri, sym->def[def].tri);
}
}
fclose(in);
@@ -273,9 +306,11 @@ int conf_read(const char *name)
struct symbol *sym;
struct property *prop;
struct expr *e;
- int i;
+ int i, flags;
+
+ sym_change_count = 0;
- if (conf_read_simple(name))
+ if (conf_read_simple(name, S_DEF_USER))
return 1;
for_all_symbols(i, sym) {
@@ -287,12 +322,12 @@ int conf_read(const char *name)
switch (sym->type) {
case S_BOOLEAN:
case S_TRISTATE:
- if (sym->user.tri != sym_get_tristate_value(sym))
+ if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym))
break;
if (!sym_is_choice(sym))
goto sym_ok;
default:
- if (!strcmp(sym->curr.val, sym->user.val))
+ if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val))
goto sym_ok;
break;
}
@@ -304,15 +339,13 @@ int conf_read(const char *name)
sym_ok:
if (sym_has_value(sym) && !sym_is_choice_value(sym)) {
if (sym->visible == no)
- sym->flags |= SYMBOL_NEW;
+ sym->flags &= ~SYMBOL_DEF_USER;
switch (sym->type) {
case S_STRING:
case S_INT:
case S_HEX:
- if (!sym_string_within_range(sym, sym->user.val)) {
- sym->flags |= SYMBOL_NEW;
- sym->flags &= ~SYMBOL_VALID;
- }
+ if (!sym_string_within_range(sym, sym->def[S_DEF_USER].val))
+ sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER);
default:
break;
}
@@ -320,19 +353,21 @@ int conf_read(const char *name)
if (!sym_is_choice(sym))
continue;
prop = sym_get_choice_prop(sym);
+ flags = sym->flags;
for (e = prop->expr; e; e = e->left.expr)
if (e->right.sym->visible != no)
- sym->flags |= e->right.sym->flags & SYMBOL_NEW;
+ flags &= e->right.sym->flags;
+ sym->flags |= flags & SYMBOL_DEF_USER;
}
- sym_change_count = conf_warnings || conf_unsaved;
+ sym_change_count += conf_warnings || conf_unsaved;
return 0;
}
int conf_write(const char *name)
{
- FILE *out, *out_h;
+ FILE *out;
struct symbol *sym;
struct menu *menu;
const char *basename;
@@ -351,7 +386,7 @@ int conf_write(const char *name)
if (!stat(name, &st) && S_ISDIR(st.st_mode)) {
strcpy(dirname, name);
strcat(dirname, "/");
- basename = conf_def_filename;
+ basename = conf_get_configname();
} else if ((slash = strrchr(name, '/'))) {
int size = slash - name + 1;
memcpy(dirname, name, size);
@@ -359,23 +394,24 @@ int conf_write(const char *name)
if (slash[1])
basename = slash + 1;
else
- basename = conf_def_filename;
+ basename = conf_get_configname();
} else
basename = name;
} else
- basename = conf_def_filename;
+ basename = conf_get_configname();
- sprintf(newname, "%s.tmpconfig.%d", dirname, (int)getpid());
- out = fopen(newname, "w");
+ sprintf(newname, "%s%s", dirname, basename);
+ env = getenv("KCONFIG_OVERWRITECONFIG");
+ if (!env || !*env) {
+ sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid());
+ out = fopen(tmpname, "w");
+ } else {
+ *tmpname = 0;
+ out = fopen(newname, "w");
+ }
if (!out)
return 1;
- out_h = NULL;
- if (!name) {
- out_h = fopen(".tmpconfig.h", "w");
- if (!out_h)
- return 1;
- file_write_dep(NULL);
- }
+
sym = sym_lookup("KERNELVERSION", 0);
sym_calc_value(sym);
time(&now);
@@ -391,16 +427,6 @@ int conf_write(const char *name)
sym_get_string_value(sym),
use_timestamp ? "# " : "",
use_timestamp ? ctime(&now) : "");
- if (out_h)
- fprintf(out_h, "/*\n"
- " * Automatically generated C config: don't edit\n"
- " * Linux kernel version: %s\n"
- "%s%s"
- " */\n"
- "#define AUTOCONF_INCLUDED\n",
- sym_get_string_value(sym),
- use_timestamp ? " * " : "",
- use_timestamp ? ctime(&now) : "");
if (!sym_change_count)
sym_clear_all_valid();
@@ -416,11 +442,6 @@ int conf_write(const char *name)
"#\n"
"# %s\n"
"#\n", str);
- if (out_h)
- fprintf(out_h, "\n"
- "/*\n"
- " * %s\n"
- " */\n", str);
} else if (!(sym->flags & SYMBOL_CHOICE)) {
sym_calc_value(sym);
if (!(sym->flags & SYMBOL_WRITE))
@@ -438,59 +459,39 @@ int conf_write(const char *name)
switch (sym_get_tristate_value(sym)) {
case no:
fprintf(out, "# CONFIG_%s is not set\n", sym->name);
- if (out_h)
- fprintf(out_h, "#undef CONFIG_%s\n", sym->name);
break;
case mod:
fprintf(out, "CONFIG_%s=m\n", sym->name);
- if (out_h)
- fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
break;
case yes:
fprintf(out, "CONFIG_%s=y\n", sym->name);
- if (out_h)
- fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
break;
}
break;
case S_STRING:
- // fix me
str = sym_get_string_value(sym);
fprintf(out, "CONFIG_%s=\"", sym->name);
- if (out_h)
- fprintf(out_h, "#define CONFIG_%s \"", sym->name);
- do {
+ while (1) {
l = strcspn(str, "\"\\");
if (l) {
fwrite(str, l, 1, out);
- if (out_h)
- fwrite(str, l, 1, out_h);
- }
- str += l;
- while (*str == '\\' || *str == '"') {
- fprintf(out, "\\%c", *str);
- if (out_h)
- fprintf(out_h, "\\%c", *str);
- str++;
+ str += l;
}
- } while (*str);
+ if (!*str)
+ break;
+ fprintf(out, "\\%c", *str++);
+ }
fputs("\"\n", out);
- if (out_h)
- fputs("\"\n", out_h);
break;
case S_HEX:
str = sym_get_string_value(sym);
if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
- if (out_h)
- fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
break;
}
case S_INT:
str = sym_get_string_value(sym);
fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
- if (out_h)
- fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
break;
}
}
@@ -510,21 +511,253 @@ int conf_write(const char *name)
}
}
fclose(out);
- if (out_h) {
- fclose(out_h);
- rename(".tmpconfig.h", "include/linux/autoconf.h");
+
+ if (*tmpname) {
+ strcat(dirname, name ? name : conf_get_configname());
+ strcat(dirname, ".old");
+ rename(newname, dirname);
+ if (rename(tmpname, newname))
+ return 1;
}
- if (!name || basename != conf_def_filename) {
- if (!name)
- name = conf_def_filename;
- sprintf(tmpname, "%s.old", name);
- rename(name, tmpname);
+
+ printf(_("#\n"
+ "# configuration written to %s\n"
+ "#\n"), newname);
+
+ sym_change_count = 0;
+
+ return 0;
+}
+
+int conf_split_config(void)
+{
+ char *name, path[128];
+ char *s, *d, c;
+ struct symbol *sym;
+ struct stat sb;
+ int res, i, fd;
+
+ name = getenv("KCONFIG_AUTOCONFIG");
+ if (!name)
+ name = "include/config/auto.conf";
+ conf_read_simple(name, S_DEF_AUTO);
+
+ if (chdir("include/config"))
+ return 1;
+
+ res = 0;
+ for_all_symbols(i, sym) {
+ sym_calc_value(sym);
+ if ((sym->flags & SYMBOL_AUTO) || !sym->name)
+ continue;
+ if (sym->flags & SYMBOL_WRITE) {
+ if (sym->flags & SYMBOL_DEF_AUTO) {
+ /*
+ * symbol has old and new value,
+ * so compare them...
+ */
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ if (sym_get_tristate_value(sym) ==
+ sym->def[S_DEF_AUTO].tri)
+ continue;
+ break;
+ case S_STRING:
+ case S_HEX:
+ case S_INT:
+ if (!strcmp(sym_get_string_value(sym),
+ sym->def[S_DEF_AUTO].val))
+ continue;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /*
+ * If there is no old value, only 'no' (unset)
+ * is allowed as new value.
+ */
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ if (sym_get_tristate_value(sym) == no)
+ continue;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (!(sym->flags & SYMBOL_DEF_AUTO))
+ /* There is neither an old nor a new value. */
+ continue;
+ /* else
+ * There is an old value, but no new value ('no' (unset)
+ * isn't saved in auto.conf, so the old value is always
+ * different from 'no').
+ */
+
+ /* Replace all '_' and append ".h" */
+ s = sym->name;
+ d = path;
+ while ((c = *s++)) {
+ c = tolower(c);
+ *d++ = (c == '_') ? '/' : c;
+ }
+ strcpy(d, ".h");
+
+ /* Assume directory path already exists. */
+ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1) {
+ if (errno != ENOENT) {
+ res = 1;
+ break;
+ }
+ /*
+ * Create directory components,
+ * unless they exist already.
+ */
+ d = path;
+ while ((d = strchr(d, '/'))) {
+ *d = 0;
+ if (stat(path, &sb) && mkdir(path, 0755)) {
+ res = 1;
+ goto out;
+ }
+ *d++ = '/';
+ }
+ /* Try it again. */
+ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1) {
+ res = 1;
+ break;
+ }
+ }
+ close(fd);
}
- sprintf(tmpname, "%s%s", dirname, basename);
- if (rename(newname, tmpname))
+out:
+ if (chdir("../.."))
return 1;
- sym_change_count = 0;
+ return res;
+}
+
+int conf_write_autoconf(void)
+{
+ struct symbol *sym;
+ const char *str;
+ char *name;
+ FILE *out, *out_h;
+ time_t now;
+ int i, l;
+
+ sym_clear_all_valid();
+
+ file_write_dep("include/config/auto.conf.cmd");
+
+ if (conf_split_config())
+ return 1;
+
+ out = fopen(".tmpconfig", "w");
+ if (!out)
+ return 1;
+
+ out_h = fopen(".tmpconfig.h", "w");
+ if (!out_h) {
+ fclose(out);
+ return 1;
+ }
+
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym_calc_value(sym);
+ time(&now);
+ fprintf(out, "#\n"
+ "# Automatically generated make config: don't edit\n"
+ "# Linux kernel version: %s\n"
+ "# %s"
+ "#\n",
+ sym_get_string_value(sym), ctime(&now));
+ fprintf(out_h, "/*\n"
+ " * Automatically generated C config: don't edit\n"
+ " * Linux kernel version: %s\n"
+ " * %s"
+ " */\n"
+ "#define AUTOCONF_INCLUDED\n",
+ sym_get_string_value(sym), ctime(&now));
+
+ for_all_symbols(i, sym) {
+ sym_calc_value(sym);
+ if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
+ continue;
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ switch (sym_get_tristate_value(sym)) {
+ case no:
+ break;
+ case mod:
+ fprintf(out, "CONFIG_%s=m\n", sym->name);
+ fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
+ break;
+ case yes:
+ fprintf(out, "CONFIG_%s=y\n", sym->name);
+ fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
+ break;
+ }
+ break;
+ case S_STRING:
+ str = sym_get_string_value(sym);
+ fprintf(out, "CONFIG_%s=\"", sym->name);
+ fprintf(out_h, "#define CONFIG_%s \"", sym->name);
+ while (1) {
+ l = strcspn(str, "\"\\");
+ if (l) {
+ fwrite(str, l, 1, out);
+ fwrite(str, l, 1, out_h);
+ str += l;
+ }
+ if (!*str)
+ break;
+ fprintf(out, "\\%c", *str);
+ fprintf(out_h, "\\%c", *str);
+ str++;
+ }
+ fputs("\"\n", out);
+ fputs("\"\n", out_h);
+ break;
+ case S_HEX:
+ str = sym_get_string_value(sym);
+ if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
+ fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
+ break;
+ }
+ case S_INT:
+ str = sym_get_string_value(sym);
+ fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
+ break;
+ default:
+ break;
+ }
+ }
+ fclose(out);
+ fclose(out_h);
+
+ name = getenv("KCONFIG_AUTOHEADER");
+ if (!name)
+ name = "include/linux/autoconf.h";
+ if (rename(".tmpconfig.h", name))
+ return 1;
+ name = getenv("KCONFIG_AUTOCONFIG");
+ if (!name)
+ name = "include/config/auto.conf";
+ /*
+ * This must be the last step, kbuild has a dependency on auto.conf
+ * and this marks the successful completion of the previous steps.
+ */
+ if (rename(".tmpconfig", name))
+ return 1;
return 0;
}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 30e4f9d69c2f..6f98dbfe70cf 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -145,7 +145,8 @@ static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct e
return;
}
if (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
- e1->left.sym == e2->left.sym && (e1->left.sym->flags & (SYMBOL_YES|SYMBOL_NO)))
+ e1->left.sym == e2->left.sym &&
+ (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no))
return;
if (!expr_eq(e1, e2))
return;
@@ -1012,73 +1013,73 @@ int expr_compare_type(enum expr_type t1, enum expr_type t2)
#endif
}
-void expr_print(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken)
+void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken)
{
if (!e) {
- fn(data, "y");
+ fn(data, NULL, "y");
return;
}
if (expr_compare_type(prevtoken, e->type) > 0)
- fn(data, "(");
+ fn(data, NULL, "(");
switch (e->type) {
case E_SYMBOL:
if (e->left.sym->name)
- fn(data, e->left.sym->name);
+ fn(data, e->left.sym, e->left.sym->name);
else
- fn(data, "<choice>");
+ fn(data, NULL, "<choice>");
break;
case E_NOT:
- fn(data, "!");
+ fn(data, NULL, "!");
expr_print(e->left.expr, fn, data, E_NOT);
break;
case E_EQUAL:
- fn(data, e->left.sym->name);
- fn(data, "=");
- fn(data, e->right.sym->name);
+ fn(data, e->left.sym, e->left.sym->name);
+ fn(data, NULL, "=");
+ fn(data, e->right.sym, e->right.sym->name);
break;
case E_UNEQUAL:
- fn(data, e->left.sym->name);
- fn(data, "!=");
- fn(data, e->right.sym->name);
+ fn(data, e->left.sym, e->left.sym->name);
+ fn(data, NULL, "!=");
+ fn(data, e->right.sym, e->right.sym->name);
break;
case E_OR:
expr_print(e->left.expr, fn, data, E_OR);
- fn(data, " || ");
+ fn(data, NULL, " || ");
expr_print(e->right.expr, fn, data, E_OR);
break;
case E_AND:
expr_print(e->left.expr, fn, data, E_AND);
- fn(data, " && ");
+ fn(data, NULL, " && ");
expr_print(e->right.expr, fn, data, E_AND);
break;
case E_CHOICE:
- fn(data, e->right.sym->name);
+ fn(data, e->right.sym, e->right.sym->name);
if (e->left.expr) {
- fn(data, " ^ ");
+ fn(data, NULL, " ^ ");
expr_print(e->left.expr, fn, data, E_CHOICE);
}
break;
case E_RANGE:
- fn(data, "[");
- fn(data, e->left.sym->name);
- fn(data, " ");
- fn(data, e->right.sym->name);
- fn(data, "]");
+ fn(data, NULL, "[");
+ fn(data, e->left.sym, e->left.sym->name);
+ fn(data, NULL, " ");
+ fn(data, e->right.sym, e->right.sym->name);
+ fn(data, NULL, "]");
break;
default:
{
char buf[32];
sprintf(buf, "<unknown type %d>", e->type);
- fn(data, buf);
+ fn(data, NULL, buf);
break;
}
}
if (expr_compare_type(prevtoken, e->type) > 0)
- fn(data, ")");
+ fn(data, NULL, ")");
}
-static void expr_print_file_helper(void *data, const char *str)
+static void expr_print_file_helper(void *data, struct symbol *sym, const char *str)
{
fwrite(str, strlen(str), 1, data);
}
@@ -1088,7 +1089,7 @@ void expr_fprint(struct expr *e, FILE *out)
expr_print(e, expr_print_file_helper, out, E_NONE);
}
-static void expr_print_gstr_helper(void *data, const char *str)
+static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str)
{
str_append((struct gstr*)data, str);
}
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 1b36ef18c48d..6084525f604b 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -63,12 +63,18 @@ enum symbol_type {
S_UNKNOWN, S_BOOLEAN, S_TRISTATE, S_INT, S_HEX, S_STRING, S_OTHER
};
+enum {
+ S_DEF_USER, /* main user value */
+ S_DEF_AUTO,
+};
+
struct symbol {
struct symbol *next;
char *name;
char *help;
enum symbol_type type;
- struct symbol_value curr, user;
+ struct symbol_value curr;
+ struct symbol_value def[4];
tristate visible;
int flags;
struct property *prop;
@@ -78,10 +84,7 @@ struct symbol {
#define for_all_symbols(i, sym) for (i = 0; i < 257; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
-#define SYMBOL_YES 0x0001
-#define SYMBOL_MOD 0x0002
-#define SYMBOL_NO 0x0004
-#define SYMBOL_CONST 0x0007
+#define SYMBOL_CONST 0x0001
#define SYMBOL_CHECK 0x0008
#define SYMBOL_CHOICE 0x0010
#define SYMBOL_CHOICEVAL 0x0020
@@ -90,10 +93,14 @@ struct symbol {
#define SYMBOL_OPTIONAL 0x0100
#define SYMBOL_WRITE 0x0200
#define SYMBOL_CHANGED 0x0400
-#define SYMBOL_NEW 0x0800
#define SYMBOL_AUTO 0x1000
#define SYMBOL_CHECKED 0x2000
#define SYMBOL_WARNED 0x8000
+#define SYMBOL_DEF 0x10000
+#define SYMBOL_DEF_USER 0x10000
+#define SYMBOL_DEF_AUTO 0x20000
+#define SYMBOL_DEF3 0x40000
+#define SYMBOL_DEF4 0x80000
#define SYMBOL_MAXLENGTH 256
#define SYMBOL_HASHSIZE 257
@@ -149,6 +156,7 @@ struct file *lookup_file(const char *name);
extern struct symbol symbol_yes, symbol_no, symbol_mod;
extern struct symbol *modules_sym;
+extern struct symbol *sym_defconfig_list;
extern int cdebug;
struct expr *expr_alloc_symbol(struct symbol *sym);
struct expr *expr_alloc_one(enum expr_type type, struct expr *ce);
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 665bd5300a19..7b0d3a93d5c0 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -114,12 +114,6 @@ const char *dbg_print_flags(int val)
bzero(buf, 256);
- if (val & SYMBOL_YES)
- strcat(buf, "yes/");
- if (val & SYMBOL_MOD)
- strcat(buf, "mod/");
- if (val & SYMBOL_NO)
- strcat(buf, "no/");
if (val & SYMBOL_CONST)
strcat(buf, "const/");
if (val & SYMBOL_CHECK)
@@ -138,8 +132,6 @@ const char *dbg_print_flags(int val)
strcat(buf, "write/");
if (val & SYMBOL_CHANGED)
strcat(buf, "changed/");
- if (val & SYMBOL_NEW)
- strcat(buf, "new/");
if (val & SYMBOL_AUTO)
strcat(buf, "auto/");
@@ -1192,9 +1184,7 @@ static gchar **fill_row(struct menu *menu)
row[COL_OPTION] =
g_strdup_printf("%s %s", menu_get_prompt(menu),
- sym ? (sym->
- flags & SYMBOL_NEW ? "(NEW)" : "") :
- "");
+ sym && sym_has_value(sym) ? "(NEW)" : "");
if (show_all && !menu_is_visible(menu))
row[COL_COLOR] = g_strdup("DarkGray");
diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped
index 24e3c8cbb7ac..800f8c71c407 100644
--- a/scripts/kconfig/lex.zconf.c_shipped
+++ b/scripts/kconfig/lex.zconf.c_shipped
@@ -8,7 +8,7 @@
#define FLEX_SCANNER
#define YY_FLEX_MAJOR_VERSION 2
#define YY_FLEX_MINOR_VERSION 5
-#define YY_FLEX_SUBMINOR_VERSION 31
+#define YY_FLEX_SUBMINOR_VERSION 33
#if YY_FLEX_SUBMINOR_VERSION > 0
#define FLEX_BETA
#endif
@@ -30,7 +30,15 @@
/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
-#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
+#if __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
#include <inttypes.h>
typedef int8_t flex_int8_t;
typedef uint8_t flex_uint8_t;
@@ -134,6 +142,10 @@ typedef unsigned int flex_uint32_t;
#define YY_BUF_SIZE 16384
#endif
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
#ifndef YY_TYPEDEF_YY_BUFFER_STATE
#define YY_TYPEDEF_YY_BUFFER_STATE
typedef struct yy_buffer_state *YY_BUFFER_STATE;
@@ -267,7 +279,7 @@ int zconfleng;
/* Points to current character in buffer. */
static char *yy_c_buf_p = (char *) 0;
-static int yy_init = 1; /* whether we need to initialize */
+static int yy_init = 0; /* whether we need to initialize */
static int yy_start = 0; /* start state number */
/* Flag which is used to allow zconfwrap()'s to do buffer switches
@@ -820,6 +832,8 @@ void alloc_string(const char *str, int size)
#define YY_EXTRA_TYPE void *
#endif
+static int yy_init_globals (void );
+
/* Macros after this point can all be overridden by user definitions in
* section 1.
*/
@@ -942,9 +956,9 @@ YY_DECL
int str = 0;
int ts, i;
- if ( (yy_init) )
+ if ( !(yy_init) )
{
- (yy_init) = 0;
+ (yy_init) = 1;
#ifdef YY_USER_INIT
YY_USER_INIT;
@@ -1452,7 +1466,7 @@ static int yy_get_next_buffer (void)
else
{
- size_t num_to_read =
+ int num_to_read =
YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
while ( num_to_read <= 0 )
@@ -1969,16 +1983,16 @@ YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size )
/** Setup the input buffer state to scan a string. The next call to zconflex() will
* scan from a @e copy of @a str.
- * @param yy_str a NUL-terminated string to scan
+ * @param yystr a NUL-terminated string to scan
*
* @return the newly allocated buffer state object.
* @note If you want to scan bytes that may contain NUL values, then use
* zconf_scan_bytes() instead.
*/
-YY_BUFFER_STATE zconf_scan_string (yyconst char * yy_str )
+YY_BUFFER_STATE zconf_scan_string (yyconst char * yystr )
{
- return zconf_scan_bytes(yy_str,strlen(yy_str) );
+ return zconf_scan_bytes(yystr,strlen(yystr) );
}
/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
@@ -1988,7 +2002,7 @@ YY_BUFFER_STATE zconf_scan_string (yyconst char * yy_str )
*
* @return the newly allocated buffer state object.
*/
-YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len )
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char * yybytes, int _yybytes_len )
{
YY_BUFFER_STATE b;
char *buf;
@@ -1996,15 +2010,15 @@ YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len )
int i;
/* Get memory for full buffer, including space for trailing EOB's. */
- n = len + 2;
+ n = _yybytes_len + 2;
buf = (char *) zconfalloc(n );
if ( ! buf )
YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
- for ( i = 0; i < len; ++i )
- buf[i] = bytes[i];
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
- buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
b = zconf_scan_buffer(buf,n );
if ( ! b )
@@ -2125,6 +2139,34 @@ void zconfset_debug (int bdebug )
zconf_flex_debug = bdebug ;
}
+static int yy_init_globals (void)
+{
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from zconflex_destroy(), so don't allocate here.
+ */
+
+ (yy_buffer_stack) = 0;
+ (yy_buffer_stack_top) = 0;
+ (yy_buffer_stack_max) = 0;
+ (yy_c_buf_p) = (char *) 0;
+ (yy_init) = 0;
+ (yy_start) = 0;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ zconfin = stdin;
+ zconfout = stdout;
+#else
+ zconfin = (FILE *) 0;
+ zconfout = (FILE *) 0;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * zconflex_init()
+ */
+ return 0;
+}
+
/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
int zconflex_destroy (void)
{
@@ -2140,6 +2182,10 @@ int zconflex_destroy (void)
zconffree((yy_buffer_stack) );
(yy_buffer_stack) = NULL;
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * zconflex() is called, initialization will occur. */
+ yy_init_globals( );
+
return 0;
}
@@ -2151,7 +2197,7 @@ int zconflex_destroy (void)
static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
{
register int i;
- for ( i = 0; i < n; ++i )
+ for ( i = 0; i < n; ++i )
s1[i] = s2[i];
}
#endif
@@ -2160,7 +2206,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
static int yy_flex_strlen (yyconst char * s )
{
register int n;
- for ( n = 0; s[n]; ++n )
+ for ( n = 0; s[n]; ++n )
;
return n;
@@ -2191,19 +2237,6 @@ void zconffree (void * ptr )
#define YYTABLES_NAME "yytables"
-#undef YY_NEW_FILE
-#undef YY_FLUSH_BUFFER
-#undef yy_set_bol
-#undef yy_new_buffer
-#undef yy_set_interactive
-#undef yytext_ptr
-#undef YY_DO_BEFORE_ACTION
-
-#ifdef YY_DECL_IS_OURS
-#undef YY_DECL_IS_OURS
-#undef YY_DECL
-#endif
-
void zconf_starthelp(void)
{
new_string();
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 527f60c99c50..2628023a1fe1 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -40,6 +40,10 @@ extern "C" {
#define TF_COMMAND 0x0001
#define TF_PARAM 0x0002
+#define TF_OPTION 0x0004
+
+#define T_OPT_MODULES 1
+#define T_OPT_DEFCONFIG_LIST 2
struct kconf_id {
int name;
@@ -60,8 +64,6 @@ int zconf_lineno(void);
char *zconf_curname(void);
/* confdata.c */
-extern const char conf_def_filename[];
-
char *conf_get_default_confname(void);
/* kconfig_load.c */
@@ -78,6 +80,7 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
+void menu_add_option(int token, char *arg);
void menu_finalize(struct menu *parent);
void menu_set_type(int type);
@@ -99,6 +102,7 @@ const char *str_get(struct gstr *gs);
/* symbol.c */
void sym_init(void);
void sym_clear_all_valid(void);
+void sym_set_all_changed(void);
void sym_set_changed(struct symbol *sym);
struct symbol *sym_check_deps(struct symbol *sym);
struct property *prop_alloc(enum prop_type type, struct symbol *sym);
@@ -137,7 +141,7 @@ static inline bool sym_is_optional(struct symbol *sym)
static inline bool sym_has_value(struct symbol *sym)
{
- return sym->flags & SYMBOL_NEW ? false : true;
+ return sym->flags & SYMBOL_DEF_USER ? true : false;
}
#ifdef __cplusplus
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index b6a389c5fcbd..a263746cfa7d 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -2,8 +2,9 @@
/* confdata.c */
P(conf_parse,void,(const char *name));
P(conf_read,int,(const char *name));
-P(conf_read_simple,int,(const char *name));
+P(conf_read_simple,int,(const char *name, int));
P(conf_write,int,(const char *name));
+P(conf_write_autoconf,int,(void));
/* menu.c */
P(rootmenu,struct menu,);
@@ -38,4 +39,4 @@ P(prop_get_type_name,const char *,(enum prop_type type));
/* expr.c */
P(expr_compare_type,int,(enum expr_type t1, enum expr_type t2));
-P(expr_print,void,(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken));
+P(expr_print,void,(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken));
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 0fce20cb7f3c..c86c27f2c761 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -114,7 +114,7 @@ void menu_set_type(int type)
sym->type = type;
return;
}
- menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'\n",
+ menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'",
sym->name ? sym->name : "<choice>",
sym_type_name(sym->type), sym_type_name(type));
}
@@ -124,15 +124,20 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
struct property *prop = prop_alloc(type, current_entry->sym);
prop->menu = current_entry;
- prop->text = prompt;
prop->expr = expr;
prop->visible.expr = menu_check_dep(dep);
if (prompt) {
+ if (isspace(*prompt)) {
+ prop_warn(prop, "leading whitespace ignored");
+ while (isspace(*prompt))
+ prompt++;
+ }
if (current_entry->prompt)
- menu_warn(current_entry, "prompt redefined\n");
+ prop_warn(prop, "prompt redefined");
current_entry->prompt = prop;
}
+ prop->text = prompt;
return prop;
}
@@ -152,6 +157,24 @@ void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep)
menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep);
}
+void menu_add_option(int token, char *arg)
+{
+ struct property *prop;
+
+ switch (token) {
+ case T_OPT_MODULES:
+ prop = prop_alloc(P_DEFAULT, modules_sym);
+ prop->expr = expr_alloc_symbol(current_entry->sym);
+ break;
+ case T_OPT_DEFCONFIG_LIST:
+ if (!sym_defconfig_list)
+ sym_defconfig_list = current_entry->sym;
+ else if (sym_defconfig_list != current_entry->sym)
+ zconf_error("trying to redefine defconfig symbol");
+ break;
+ }
+}
+
static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2)
{
return sym2->type == S_INT || sym2->type == S_HEX ||
@@ -325,11 +348,10 @@ void menu_finalize(struct menu *parent)
if (sym && !(sym->flags & SYMBOL_WARNED)) {
if (sym->type == S_UNKNOWN)
- menu_warn(parent, "config symbol defined "
- "without type\n");
+ menu_warn(parent, "config symbol defined without type");
if (sym_is_choice(sym) && !parent->prompt)
- menu_warn(parent, "choice must have a prompt\n");
+ menu_warn(parent, "choice must have a prompt");
/* Check properties connected to this symbol */
sym_check_prop(sym);
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 4590cd31623f..393f3749f330 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -6,16 +6,20 @@
#include <qapplication.h>
#include <qmainwindow.h>
#include <qtoolbar.h>
+#include <qlayout.h>
#include <qvbox.h>
#include <qsplitter.h>
#include <qlistview.h>
-#include <qtextview.h>
+#include <qtextbrowser.h>
#include <qlineedit.h>
+#include <qlabel.h>
+#include <qpushbutton.h>
#include <qmenubar.h>
#include <qmessagebox.h>
#include <qaction.h>
#include <qheader.h>
#include <qfiledialog.h>
+#include <qdragobject.h>
#include <qregexp.h>
#include <stdlib.h>
@@ -32,32 +36,16 @@
#endif
static QApplication *configApp;
+static ConfigSettings *configSettings;
static inline QString qgettext(const char* str)
{
- return QString::fromLocal8Bit(gettext(str));
+ return QString::fromLocal8Bit(gettext(str));
}
static inline QString qgettext(const QString& str)
{
- return QString::fromLocal8Bit(gettext(str.latin1()));
-}
-
-ConfigSettings::ConfigSettings()
- : showAll(false), showName(false), showRange(false), showData(false)
-{
-}
-
-#if QT_VERSION >= 300
-/**
- * Reads the list column settings from the application settings.
- */
-void ConfigSettings::readListSettings()
-{
- showAll = readBoolEntry("/kconfig/qconf/showAll", false);
- showName = readBoolEntry("/kconfig/qconf/showName", false);
- showRange = readBoolEntry("/kconfig/qconf/showRange", false);
- showData = readBoolEntry("/kconfig/qconf/showData", false);
+ return QString::fromLocal8Bit(gettext(str.latin1()));
}
/**
@@ -88,76 +76,7 @@ bool ConfigSettings::writeSizes(const QString& key, const QValueList<int>& value
stringList.push_back(QString::number(*it));
return writeEntry(key, stringList);
}
-#endif
-
-
-/*
- * update all the children of a menu entry
- * removes/adds the entries from the parent widget as necessary
- *
- * parent: either the menu list widget or a menu entry widget
- * menu: entry to be updated
- */
-template <class P>
-void ConfigList::updateMenuList(P* parent, struct menu* menu)
-{
- struct menu* child;
- ConfigItem* item;
- ConfigItem* last;
- bool visible;
- enum prop_type type;
- if (!menu) {
- while ((item = parent->firstChild()))
- delete item;
- return;
- }
-
- last = parent->firstChild();
- if (last && !last->goParent)
- last = 0;
- for (child = menu->list; child; child = child->next) {
- item = last ? last->nextSibling() : parent->firstChild();
- type = child->prompt ? child->prompt->type : P_UNKNOWN;
-
- switch (mode) {
- case menuMode:
- if (!(child->flags & MENU_ROOT))
- goto hide;
- break;
- case symbolMode:
- if (child->flags & MENU_ROOT)
- goto hide;
- break;
- default:
- break;
- }
-
- visible = menu_is_visible(child);
- if (showAll || visible) {
- if (!item || item->menu != child)
- item = new ConfigItem(parent, last, child, visible);
- else
- item->testUpdateMenu(visible);
-
- if (mode == fullMode || mode == menuMode || type != P_MENU)
- updateMenuList(item, child);
- else
- updateMenuList(item, 0);
- last = item;
- continue;
- }
- hide:
- if (item && item->menu == child) {
- last = parent->firstChild();
- if (last == item)
- last = 0;
- else while (last->nextSibling() != item)
- last = last->nextSibling();
- delete item;
- }
- }
-}
#if QT_VERSION >= 300
/*
@@ -355,6 +274,12 @@ ConfigItem::~ConfigItem(void)
}
}
+ConfigLineEdit::ConfigLineEdit(ConfigView* parent)
+ : Parent(parent)
+{
+ connect(this, SIGNAL(lostFocus()), SLOT(hide()));
+}
+
void ConfigLineEdit::show(ConfigItem* i)
{
item = i;
@@ -385,14 +310,14 @@ void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
hide();
}
-ConfigList::ConfigList(ConfigView* p, ConfigMainWindow* cv, ConfigSettings* configSettings)
- : Parent(p), cview(cv),
+ConfigList::ConfigList(ConfigView* p, const char *name)
+ : Parent(p, name),
updateAll(false),
symbolYesPix(xpm_symbol_yes), symbolModPix(xpm_symbol_mod), symbolNoPix(xpm_symbol_no),
choiceYesPix(xpm_choice_yes), choiceNoPix(xpm_choice_no),
menuPix(xpm_menu), menuInvPix(xpm_menu_inv), menuBackPix(xpm_menuback), voidPix(xpm_void),
showAll(false), showName(false), showRange(false), showData(false),
- rootEntry(0)
+ rootEntry(0), headerPopup(0)
{
int i;
@@ -406,11 +331,14 @@ ConfigList::ConfigList(ConfigView* p, ConfigMainWindow* cv, ConfigSettings* conf
connect(this, SIGNAL(selectionChanged(void)),
SLOT(updateSelection(void)));
- if (configSettings) {
- showAll = configSettings->showAll;
- showName = configSettings->showName;
- showRange = configSettings->showRange;
- showData = configSettings->showData;
+ if (name) {
+ configSettings->beginGroup(name);
+ showAll = configSettings->readBoolEntry("/showAll", false);
+ showName = configSettings->readBoolEntry("/showName", false);
+ showRange = configSettings->readBoolEntry("/showRange", false);
+ showData = configSettings->readBoolEntry("/showData", false);
+ configSettings->endGroup();
+ connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
}
for (i = 0; i < colNr; i++)
@@ -441,6 +369,30 @@ void ConfigList::reinit(void)
updateListAll();
}
+void ConfigList::saveSettings(void)
+{
+ if (name()) {
+ configSettings->beginGroup(name());
+ configSettings->writeEntry("/showName", showName);
+ configSettings->writeEntry("/showRange", showRange);
+ configSettings->writeEntry("/showData", showData);
+ configSettings->writeEntry("/showAll", showAll);
+ configSettings->endGroup();
+ }
+}
+
+ConfigItem* ConfigList::findConfigItem(struct menu *menu)
+{
+ ConfigItem* item = (ConfigItem*)menu->data;
+
+ for (; item; item = item->nextItem) {
+ if (this == item->listView())
+ break;
+ }
+
+ return item;
+}
+
void ConfigList::updateSelection(void)
{
struct menu *menu;
@@ -450,9 +402,8 @@ void ConfigList::updateSelection(void)
if (!item)
return;
- cview->setHelp(item);
-
menu = item->menu;
+ emit menuChanged(menu);
if (!menu)
return;
type = menu->prompt ? menu->prompt->type : P_UNKNOWN;
@@ -464,8 +415,20 @@ void ConfigList::updateList(ConfigItem* item)
{
ConfigItem* last = 0;
- if (!rootEntry)
- goto update;
+ if (!rootEntry) {
+ if (mode != listMode)
+ goto update;
+ QListViewItemIterator it(this);
+ ConfigItem* item;
+
+ for (; it.current(); ++it) {
+ item = (ConfigItem*)it.current();
+ if (!item->menu)
+ continue;
+ item->testUpdateMenu(menu_is_visible(item->menu));
+ }
+ return;
+ }
if (rootEntry != &rootmenu && (mode == singleMode ||
(mode == symbolMode && rootEntry->parent != &rootmenu))) {
@@ -491,14 +454,6 @@ update:
triggerUpdate();
}
-void ConfigList::setAllOpen(bool open)
-{
- QListViewItemIterator it(this);
-
- for (; it.current(); it++)
- it.current()->setOpen(open);
-}
-
void ConfigList::setValue(ConfigItem* item, tristate val)
{
struct symbol* sym;
@@ -581,6 +536,7 @@ void ConfigList::setRootMenu(struct menu *menu)
rootEntry = menu;
updateListAll();
setSelected(currentItem(), hasFocus());
+ ensureItemVisible(currentItem());
}
void ConfigList::setParentMenu(void)
@@ -603,6 +559,74 @@ void ConfigList::setParentMenu(void)
}
}
+/*
+ * update all the children of a menu entry
+ * removes/adds the entries from the parent widget as necessary
+ *
+ * parent: either the menu list widget or a menu entry widget
+ * menu: entry to be updated
+ */
+template <class P>
+void ConfigList::updateMenuList(P* parent, struct menu* menu)
+{
+ struct menu* child;
+ ConfigItem* item;
+ ConfigItem* last;
+ bool visible;
+ enum prop_type type;
+
+ if (!menu) {
+ while ((item = parent->firstChild()))
+ delete item;
+ return;
+ }
+
+ last = parent->firstChild();
+ if (last && !last->goParent)
+ last = 0;
+ for (child = menu->list; child; child = child->next) {
+ item = last ? last->nextSibling() : parent->firstChild();
+ type = child->prompt ? child->prompt->type : P_UNKNOWN;
+
+ switch (mode) {
+ case menuMode:
+ if (!(child->flags & MENU_ROOT))
+ goto hide;
+ break;
+ case symbolMode:
+ if (child->flags & MENU_ROOT)
+ goto hide;
+ break;
+ default:
+ break;
+ }
+
+ visible = menu_is_visible(child);
+ if (showAll || visible) {
+ if (!item || item->menu != child)
+ item = new ConfigItem(parent, last, child, visible);
+ else
+ item->testUpdateMenu(visible);
+
+ if (mode == fullMode || mode == menuMode || type != P_MENU)
+ updateMenuList(item, child);
+ else
+ updateMenuList(item, 0);
+ last = item;
+ continue;
+ }
+ hide:
+ if (item && item->menu == child) {
+ last = parent->firstChild();
+ if (last == item)
+ last = 0;
+ else while (last->nextSibling() != item)
+ last = last->nextSibling();
+ delete item;
+ }
+ }
+}
+
void ConfigList::keyPressEvent(QKeyEvent* ev)
{
QListViewItem* i = currentItem();
@@ -610,7 +634,7 @@ void ConfigList::keyPressEvent(QKeyEvent* ev)
struct menu *menu;
enum prop_type type;
- if (ev->key() == Key_Escape && mode != fullMode) {
+ if (ev->key() == Key_Escape && mode != fullMode && mode != listMode) {
emit parentSelected();
ev->accept();
return;
@@ -755,23 +779,62 @@ skip:
void ConfigList::focusInEvent(QFocusEvent *e)
{
+ struct menu *menu = NULL;
+
Parent::focusInEvent(e);
- QListViewItem* item = currentItem();
- if (!item)
- return;
+ ConfigItem* item = (ConfigItem *)currentItem();
+ if (item) {
+ setSelected(item, TRUE);
+ menu = item->menu;
+ }
+ emit gotFocus(menu);
+}
- setSelected(item, TRUE);
- emit gotFocus();
+void ConfigList::contextMenuEvent(QContextMenuEvent *e)
+{
+ if (e->y() <= header()->geometry().bottom()) {
+ if (!headerPopup) {
+ QAction *action;
+
+ headerPopup = new QPopupMenu(this);
+ action = new QAction("Show Name", 0, this);
+ action->setToggleAction(TRUE);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowName(bool)));
+ connect(parent(), SIGNAL(showNameChanged(bool)),
+ action, SLOT(setOn(bool)));
+ action->setOn(showName);
+ action->addTo(headerPopup);
+ action = new QAction("Show Range", 0, this);
+ action->setToggleAction(TRUE);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowRange(bool)));
+ connect(parent(), SIGNAL(showRangeChanged(bool)),
+ action, SLOT(setOn(bool)));
+ action->setOn(showRange);
+ action->addTo(headerPopup);
+ action = new QAction("Show Data", 0, this);
+ action->setToggleAction(TRUE);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowData(bool)));
+ connect(parent(), SIGNAL(showDataChanged(bool)),
+ action, SLOT(setOn(bool)));
+ action->setOn(showData);
+ action->addTo(headerPopup);
+ }
+ headerPopup->exec(e->globalPos());
+ e->accept();
+ } else
+ e->ignore();
}
ConfigView* ConfigView::viewList;
-ConfigView::ConfigView(QWidget* parent, ConfigMainWindow* cview,
- ConfigSettings *configSettings)
- : Parent(parent)
+ConfigView::ConfigView(QWidget* parent, const char *name)
+ : Parent(parent, name)
{
- list = new ConfigList(this, cview, configSettings);
+ list = new ConfigList(this, name);
lineEdit = new ConfigLineEdit(this);
lineEdit->hide();
@@ -791,6 +854,50 @@ ConfigView::~ConfigView(void)
}
}
+void ConfigView::setShowAll(bool b)
+{
+ if (list->showAll != b) {
+ list->showAll = b;
+ list->updateListAll();
+ emit showAllChanged(b);
+ }
+}
+
+void ConfigView::setShowName(bool b)
+{
+ if (list->showName != b) {
+ list->showName = b;
+ list->reinit();
+ emit showNameChanged(b);
+ }
+}
+
+void ConfigView::setShowRange(bool b)
+{
+ if (list->showRange != b) {
+ list->showRange = b;
+ list->reinit();
+ emit showRangeChanged(b);
+ }
+}
+
+void ConfigView::setShowData(bool b)
+{
+ if (list->showData != b) {
+ list->showData = b;
+ list->reinit();
+ emit showDataChanged(b);
+ }
+}
+
+void ConfigList::setAllOpen(bool open)
+{
+ QListViewItemIterator it(this);
+
+ for (; it.current(); it++)
+ it.current()->setOpen(open);
+}
+
void ConfigView::updateList(ConfigItem* item)
{
ConfigView* v;
@@ -807,6 +914,347 @@ void ConfigView::updateListAll(void)
v->list->updateListAll();
}
+ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
+ : Parent(parent, name), menu(0)
+{
+ if (name) {
+ configSettings->beginGroup(name);
+ _showDebug = configSettings->readBoolEntry("/showDebug", false);
+ configSettings->endGroup();
+ connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
+ }
+}
+
+void ConfigInfoView::saveSettings(void)
+{
+ if (name()) {
+ configSettings->beginGroup(name());
+ configSettings->writeEntry("/showDebug", showDebug());
+ configSettings->endGroup();
+ }
+}
+
+void ConfigInfoView::setShowDebug(bool b)
+{
+ if (_showDebug != b) {
+ _showDebug = b;
+ if (menu)
+ menuInfo();
+ else if (sym)
+ symbolInfo();
+ emit showDebugChanged(b);
+ }
+}
+
+void ConfigInfoView::setInfo(struct menu *m)
+{
+ if (menu == m)
+ return;
+ menu = m;
+ if (!menu)
+ clear();
+ else
+ menuInfo();
+}
+
+void ConfigInfoView::setSource(const QString& name)
+{
+ const char *p = name.latin1();
+
+ menu = NULL;
+ sym = NULL;
+
+ switch (p[0]) {
+ case 'm':
+ struct menu *m;
+
+ if (sscanf(p, "m%p", &m) == 1 && menu != m) {
+ menu = m;
+ menuInfo();
+ emit menuSelected(menu);
+ }
+ break;
+ case 's':
+ struct symbol *s;
+
+ if (sscanf(p, "s%p", &s) == 1 && sym != s) {
+ sym = s;
+ symbolInfo();
+ }
+ break;
+ }
+}
+
+void ConfigInfoView::symbolInfo(void)
+{
+ QString str;
+
+ str += "<big>Symbol: <b>";
+ str += print_filter(sym->name);
+ str += "</b></big><br><br>value: ";
+ str += print_filter(sym_get_string_value(sym));
+ str += "<br>visibility: ";
+ str += sym->visible == yes ? "y" : sym->visible == mod ? "m" : "n";
+ str += "<br>";
+ str += debug_info(sym);
+
+ setText(str);
+}
+
+void ConfigInfoView::menuInfo(void)
+{
+ struct symbol* sym;
+ QString head, debug, help;
+
+ sym = menu->sym;
+ if (sym) {
+ if (menu->prompt) {
+ head += "<big><b>";
+ head += print_filter(_(menu->prompt->text));
+ head += "</b></big>";
+ if (sym->name) {
+ head += " (";
+ if (showDebug())
+ head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += print_filter(sym->name);
+ if (showDebug())
+ head += "</a>";
+ head += ")";
+ }
+ } else if (sym->name) {
+ head += "<big><b>";
+ if (showDebug())
+ head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += print_filter(sym->name);
+ if (showDebug())
+ head += "</a>";
+ head += "</b></big>";
+ }
+ head += "<br><br>";
+
+ if (showDebug())
+ debug = debug_info(sym);
+
+ help = print_filter(_(sym->help));
+ } else if (menu->prompt) {
+ head += "<big><b>";
+ head += print_filter(_(menu->prompt->text));
+ head += "</b></big><br><br>";
+ if (showDebug()) {
+ if (menu->prompt->visible.expr) {
+ debug += "&nbsp;&nbsp;dep: ";
+ expr_print(menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br><br>";
+ }
+ }
+ }
+ if (showDebug())
+ debug += QString().sprintf("defined at %s:%d<br><br>", menu->file->name, menu->lineno);
+
+ setText(head + debug + help);
+}
+
+QString ConfigInfoView::debug_info(struct symbol *sym)
+{
+ QString debug;
+
+ debug += "type: ";
+ debug += print_filter(sym_type_name(sym->type));
+ if (sym_is_choice(sym))
+ debug += " (choice)";
+ debug += "<br>";
+ if (sym->rev_dep.expr) {
+ debug += "reverse dep: ";
+ expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ for (struct property *prop = sym->prop; prop; prop = prop->next) {
+ switch (prop->type) {
+ case P_PROMPT:
+ case P_MENU:
+ debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+ debug += print_filter(_(prop->text));
+ debug += "</a><br>";
+ break;
+ case P_DEFAULT:
+ debug += "default: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ case P_CHOICE:
+ if (sym_is_choice(sym)) {
+ debug += "choice: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ break;
+ case P_SELECT:
+ debug += "select: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ case P_RANGE:
+ debug += "range: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ default:
+ debug += "unknown property: ";
+ debug += prop_get_type_name(prop->type);
+ debug += "<br>";
+ }
+ if (prop->visible.expr) {
+ debug += "&nbsp;&nbsp;&nbsp;&nbsp;dep: ";
+ expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ }
+ debug += "<br>";
+
+ return debug;
+}
+
+QString ConfigInfoView::print_filter(const QString &str)
+{
+ QRegExp re("[<>&\"\\n]");
+ QString res = str;
+ for (int i = 0; (i = res.find(re, i)) >= 0;) {
+ switch (res[i].latin1()) {
+ case '<':
+ res.replace(i, 1, "&lt;");
+ i += 4;
+ break;
+ case '>':
+ res.replace(i, 1, "&gt;");
+ i += 4;
+ break;
+ case '&':
+ res.replace(i, 1, "&amp;");
+ i += 5;
+ break;
+ case '"':
+ res.replace(i, 1, "&quot;");
+ i += 6;
+ break;
+ case '\n':
+ res.replace(i, 1, "<br>");
+ i += 4;
+ break;
+ }
+ }
+ return res;
+}
+
+void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char *str)
+{
+ QString* text = reinterpret_cast<QString*>(data);
+ QString str2 = print_filter(str);
+
+ if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
+ *text += QString().sprintf("<a href=\"s%p\">", sym);
+ *text += str2;
+ *text += "</a>";
+ } else
+ *text += str2;
+}
+
+QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos)
+{
+ QPopupMenu* popup = Parent::createPopupMenu(pos);
+ QAction* action = new QAction("Show Debug Info", 0, popup);
+ action->setToggleAction(TRUE);
+ connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
+ action->setOn(showDebug());
+ popup->insertSeparator();
+ action->addTo(popup);
+ return popup;
+}
+
+void ConfigInfoView::contentsContextMenuEvent(QContextMenuEvent *e)
+{
+ Parent::contentsContextMenuEvent(e);
+}
+
+ConfigSearchWindow::ConfigSearchWindow(QWidget* parent, const char *name)
+ : Parent(parent, name), result(NULL)
+{
+ setCaption("Search Config");
+
+ QVBoxLayout* layout1 = new QVBoxLayout(this, 11, 6);
+ QHBoxLayout* layout2 = new QHBoxLayout(0, 0, 6);
+ layout2->addWidget(new QLabel("Find:", this));
+ editField = new QLineEdit(this);
+ connect(editField, SIGNAL(returnPressed()), SLOT(search()));
+ layout2->addWidget(editField);
+ searchButton = new QPushButton("Search", this);
+ searchButton->setAutoDefault(FALSE);
+ connect(searchButton, SIGNAL(clicked()), SLOT(search()));
+ layout2->addWidget(searchButton);
+ layout1->addLayout(layout2);
+
+ split = new QSplitter(this);
+ split->setOrientation(QSplitter::Vertical);
+ list = new ConfigView(split, name);
+ list->list->mode = listMode;
+ info = new ConfigInfoView(split, name);
+ connect(list->list, SIGNAL(menuChanged(struct menu *)),
+ info, SLOT(setInfo(struct menu *)));
+ layout1->addWidget(split);
+
+ if (name) {
+ int x, y, width, height;
+ bool ok;
+
+ configSettings->beginGroup(name);
+ width = configSettings->readNumEntry("/window width", parent->width() / 2);
+ height = configSettings->readNumEntry("/window height", parent->height() / 2);
+ resize(width, height);
+ x = configSettings->readNumEntry("/window x", 0, &ok);
+ if (ok)
+ y = configSettings->readNumEntry("/window y", 0, &ok);
+ if (ok)
+ move(x, y);
+ QValueList<int> sizes = configSettings->readSizes("/split", &ok);
+ if (ok)
+ split->setSizes(sizes);
+ configSettings->endGroup();
+ connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
+ }
+}
+
+void ConfigSearchWindow::saveSettings(void)
+{
+ if (name()) {
+ configSettings->beginGroup(name());
+ configSettings->writeEntry("/window x", pos().x());
+ configSettings->writeEntry("/window y", pos().y());
+ configSettings->writeEntry("/window width", size().width());
+ configSettings->writeEntry("/window height", size().height());
+ configSettings->writeSizes("/split", split->sizes());
+ configSettings->endGroup();
+ }
+}
+
+void ConfigSearchWindow::search(void)
+{
+ struct symbol **p;
+ struct property *prop;
+ ConfigItem *lastItem = NULL;
+
+ free(result);
+ list->list->clear();
+
+ result = sym_re_search(editField->text().latin1());
+ if (!result)
+ return;
+ for (p = result; *p; p++) {
+ for_all_prompts((*p), prop)
+ lastItem = new ConfigItem(list->list, lastItem, prop->menu,
+ menu_is_visible(prop->menu));
+ }
+}
+
/*
* Construct the complete config widget
*/
@@ -818,42 +1266,30 @@ ConfigMainWindow::ConfigMainWindow(void)
QWidget *d = configApp->desktop();
- ConfigSettings* configSettings = new ConfigSettings();
-#if QT_VERSION >= 300
- width = configSettings->readNumEntry("/kconfig/qconf/window width", d->width() - 64);
- height = configSettings->readNumEntry("/kconfig/qconf/window height", d->height() - 64);
+ width = configSettings->readNumEntry("/window width", d->width() - 64);
+ height = configSettings->readNumEntry("/window height", d->height() - 64);
resize(width, height);
- x = configSettings->readNumEntry("/kconfig/qconf/window x", 0, &ok);
+ x = configSettings->readNumEntry("/window x", 0, &ok);
if (ok)
- y = configSettings->readNumEntry("/kconfig/qconf/window y", 0, &ok);
+ y = configSettings->readNumEntry("/window y", 0, &ok);
if (ok)
move(x, y);
- showDebug = configSettings->readBoolEntry("/kconfig/qconf/showDebug", false);
-
- // read list settings into configSettings, will be used later for ConfigList setup
- configSettings->readListSettings();
-#else
- width = d->width() - 64;
- height = d->height() - 64;
- resize(width, height);
- showDebug = false;
-#endif
split1 = new QSplitter(this);
split1->setOrientation(QSplitter::Horizontal);
setCentralWidget(split1);
- menuView = new ConfigView(split1, this, configSettings);
+ menuView = new ConfigView(split1, "menu");
menuList = menuView->list;
split2 = new QSplitter(split1);
split2->setOrientation(QSplitter::Vertical);
// create config tree
- configView = new ConfigView(split2, this, configSettings);
+ configView = new ConfigView(split2, "config");
configList = configView->list;
- helpText = new QTextView(split2);
+ helpText = new ConfigInfoView(split2, "help");
helpText->setTextFormat(Qt::RichText);
setTabOrder(configList, helpText);
@@ -873,6 +1309,8 @@ ConfigMainWindow::ConfigMainWindow(void)
connect(saveAction, SIGNAL(activated()), SLOT(saveConfig()));
QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this);
connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs()));
+ QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this);
+ connect(searchAction, SIGNAL(activated()), SLOT(searchConfig()));
QAction *singleViewAction = new QAction("Single View", QPixmap(xpm_single_view), "Split View", 0, this);
connect(singleViewAction, SIGNAL(activated()), SLOT(showSingleView()));
QAction *splitViewAction = new QAction("Split View", QPixmap(xpm_split_view), "Split View", 0, this);
@@ -882,24 +1320,29 @@ ConfigMainWindow::ConfigMainWindow(void)
QAction *showNameAction = new QAction(NULL, "Show Name", 0, this);
showNameAction->setToggleAction(TRUE);
- showNameAction->setOn(configList->showName);
- connect(showNameAction, SIGNAL(toggled(bool)), SLOT(setShowName(bool)));
+ connect(showNameAction, SIGNAL(toggled(bool)), configView, SLOT(setShowName(bool)));
+ connect(configView, SIGNAL(showNameChanged(bool)), showNameAction, SLOT(setOn(bool)));
+ showNameAction->setOn(configView->showName());
QAction *showRangeAction = new QAction(NULL, "Show Range", 0, this);
showRangeAction->setToggleAction(TRUE);
+ connect(showRangeAction, SIGNAL(toggled(bool)), configView, SLOT(setShowRange(bool)));
+ connect(configView, SIGNAL(showRangeChanged(bool)), showRangeAction, SLOT(setOn(bool)));
showRangeAction->setOn(configList->showRange);
- connect(showRangeAction, SIGNAL(toggled(bool)), SLOT(setShowRange(bool)));
QAction *showDataAction = new QAction(NULL, "Show Data", 0, this);
showDataAction->setToggleAction(TRUE);
+ connect(showDataAction, SIGNAL(toggled(bool)), configView, SLOT(setShowData(bool)));
+ connect(configView, SIGNAL(showDataChanged(bool)), showDataAction, SLOT(setOn(bool)));
showDataAction->setOn(configList->showData);
- connect(showDataAction, SIGNAL(toggled(bool)), SLOT(setShowData(bool)));
QAction *showAllAction = new QAction(NULL, "Show All Options", 0, this);
showAllAction->setToggleAction(TRUE);
+ connect(showAllAction, SIGNAL(toggled(bool)), configView, SLOT(setShowAll(bool)));
+ connect(showAllAction, SIGNAL(toggled(bool)), menuView, SLOT(setShowAll(bool)));
showAllAction->setOn(configList->showAll);
- connect(showAllAction, SIGNAL(toggled(bool)), SLOT(setShowAll(bool)));
QAction *showDebugAction = new QAction(NULL, "Show Debug Info", 0, this);
showDebugAction->setToggleAction(TRUE);
- showDebugAction->setOn(showDebug);
- connect(showDebugAction, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+ connect(showDebugAction, SIGNAL(toggled(bool)), helpText, SLOT(setShowDebug(bool)));
+ connect(helpText, SIGNAL(showDebugChanged(bool)), showDebugAction, SLOT(setOn(bool)));
+ showDebugAction->setOn(helpText->showDebug());
QAction *showIntroAction = new QAction(NULL, "Introduction", 0, this);
connect(showIntroAction, SIGNAL(activated()), SLOT(showIntro()));
@@ -923,6 +1366,8 @@ ConfigMainWindow::ConfigMainWindow(void)
saveAction->addTo(config);
saveAsAction->addTo(config);
config->insertSeparator();
+ searchAction->addTo(config);
+ config->insertSeparator();
quitAction->addTo(config);
// create options menu
@@ -942,20 +1387,27 @@ ConfigMainWindow::ConfigMainWindow(void)
showIntroAction->addTo(helpMenu);
showAboutAction->addTo(helpMenu);
+ connect(configList, SIGNAL(menuChanged(struct menu *)),
+ helpText, SLOT(setInfo(struct menu *)));
connect(configList, SIGNAL(menuSelected(struct menu *)),
SLOT(changeMenu(struct menu *)));
connect(configList, SIGNAL(parentSelected()),
SLOT(goBack()));
+ connect(menuList, SIGNAL(menuChanged(struct menu *)),
+ helpText, SLOT(setInfo(struct menu *)));
connect(menuList, SIGNAL(menuSelected(struct menu *)),
SLOT(changeMenu(struct menu *)));
- connect(configList, SIGNAL(gotFocus(void)),
- SLOT(listFocusChanged(void)));
- connect(menuList, SIGNAL(gotFocus(void)),
+ connect(configList, SIGNAL(gotFocus(struct menu *)),
+ helpText, SLOT(setInfo(struct menu *)));
+ connect(menuList, SIGNAL(gotFocus(struct menu *)),
+ helpText, SLOT(setInfo(struct menu *)));
+ connect(menuList, SIGNAL(gotFocus(struct menu *)),
SLOT(listFocusChanged(void)));
+ connect(helpText, SIGNAL(menuSelected(struct menu *)),
+ SLOT(setMenuLink(struct menu *)));
-#if QT_VERSION >= 300
- QString listMode = configSettings->readEntry("/kconfig/qconf/listMode", "symbol");
+ QString listMode = configSettings->readEntry("/listMode", "symbol");
if (listMode == "single")
showSingleView();
else if (listMode == "full")
@@ -964,162 +1416,13 @@ ConfigMainWindow::ConfigMainWindow(void)
showSplitView();
// UI setup done, restore splitter positions
- QValueList<int> sizes = configSettings->readSizes("/kconfig/qconf/split1", &ok);
+ QValueList<int> sizes = configSettings->readSizes("/split1", &ok);
if (ok)
split1->setSizes(sizes);
- sizes = configSettings->readSizes("/kconfig/qconf/split2", &ok);
+ sizes = configSettings->readSizes("/split2", &ok);
if (ok)
split2->setSizes(sizes);
-#else
- showSplitView();
-#endif
- delete configSettings;
-}
-
-static QString print_filter(const QString &str)
-{
- QRegExp re("[<>&\"\\n]");
- QString res = str;
- for (int i = 0; (i = res.find(re, i)) >= 0;) {
- switch (res[i].latin1()) {
- case '<':
- res.replace(i, 1, "&lt;");
- i += 4;
- break;
- case '>':
- res.replace(i, 1, "&gt;");
- i += 4;
- break;
- case '&':
- res.replace(i, 1, "&amp;");
- i += 5;
- break;
- case '"':
- res.replace(i, 1, "&quot;");
- i += 6;
- break;
- case '\n':
- res.replace(i, 1, "<br>");
- i += 4;
- break;
- }
- }
- return res;
-}
-
-static void expr_print_help(void *data, const char *str)
-{
- reinterpret_cast<QString*>(data)->append(print_filter(str));
-}
-
-/*
- * display a new help entry as soon as a new menu entry is selected
- */
-void ConfigMainWindow::setHelp(QListViewItem* item)
-{
- struct symbol* sym;
- struct menu* menu = 0;
-
- configList->parent()->lineEdit->hide();
- if (item)
- menu = ((ConfigItem*)item)->menu;
- if (!menu) {
- helpText->setText(QString::null);
- return;
- }
-
- QString head, debug, help;
- menu = ((ConfigItem*)item)->menu;
- sym = menu->sym;
- if (sym) {
- if (menu->prompt) {
- head += "<big><b>";
- head += print_filter(_(menu->prompt->text));
- head += "</b></big>";
- if (sym->name) {
- head += " (";
- head += print_filter(_(sym->name));
- head += ")";
- }
- } else if (sym->name) {
- head += "<big><b>";
- head += print_filter(_(sym->name));
- head += "</b></big>";
- }
- head += "<br><br>";
-
- if (showDebug) {
- debug += "type: ";
- debug += print_filter(sym_type_name(sym->type));
- if (sym_is_choice(sym))
- debug += " (choice)";
- debug += "<br>";
- if (sym->rev_dep.expr) {
- debug += "reverse dep: ";
- expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- }
- for (struct property *prop = sym->prop; prop; prop = prop->next) {
- switch (prop->type) {
- case P_PROMPT:
- case P_MENU:
- debug += "prompt: ";
- debug += print_filter(_(prop->text));
- debug += "<br>";
- break;
- case P_DEFAULT:
- debug += "default: ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- break;
- case P_CHOICE:
- if (sym_is_choice(sym)) {
- debug += "choice: ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- }
- break;
- case P_SELECT:
- debug += "select: ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- break;
- case P_RANGE:
- debug += "range: ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- break;
- default:
- debug += "unknown property: ";
- debug += prop_get_type_name(prop->type);
- debug += "<br>";
- }
- if (prop->visible.expr) {
- debug += "&nbsp;&nbsp;&nbsp;&nbsp;dep: ";
- expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
- }
- }
- debug += "<br>";
- }
-
- help = print_filter(_(sym->help));
- } else if (menu->prompt) {
- head += "<big><b>";
- head += print_filter(_(menu->prompt->text));
- head += "</b></big><br><br>";
- if (showDebug) {
- if (menu->prompt->visible.expr) {
- debug += "&nbsp;&nbsp;dep: ";
- expr_print(menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
- debug += "<br><br>";
- }
- }
- }
- if (showDebug)
- debug += QString().sprintf("defined at %s:%d<br><br>", menu->file->name, menu->lineno);
- helpText->setText(head + debug + help);
}
void ConfigMainWindow::loadConfig(void)
@@ -1147,21 +1450,73 @@ void ConfigMainWindow::saveConfigAs(void)
QMessageBox::information(this, "qconf", "Unable to save configuration!");
}
+void ConfigMainWindow::searchConfig(void)
+{
+ if (!searchWindow)
+ searchWindow = new ConfigSearchWindow(this, "search");
+ searchWindow->show();
+}
+
void ConfigMainWindow::changeMenu(struct menu *menu)
{
configList->setRootMenu(menu);
backAction->setEnabled(TRUE);
}
-void ConfigMainWindow::listFocusChanged(void)
+void ConfigMainWindow::setMenuLink(struct menu *menu)
{
- if (menuList->hasFocus()) {
- if (menuList->mode == menuMode)
+ struct menu *parent;
+ ConfigList* list = NULL;
+ ConfigItem* item;
+
+ if (!menu_is_visible(menu) && !configView->showAll())
+ return;
+
+ switch (configList->mode) {
+ case singleMode:
+ list = configList;
+ parent = menu_get_parent_menu(menu);
+ if (!parent)
+ return;
+ list->setRootMenu(parent);
+ break;
+ case symbolMode:
+ if (menu->flags & MENU_ROOT) {
+ configList->setRootMenu(menu);
configList->clearSelection();
- setHelp(menuList->selectedItem());
- } else if (configList->hasFocus()) {
- setHelp(configList->selectedItem());
+ list = menuList;
+ } else {
+ list = configList;
+ parent = menu_get_parent_menu(menu->parent);
+ if (!parent)
+ return;
+ item = menuList->findConfigItem(parent);
+ if (item) {
+ menuList->setSelected(item, TRUE);
+ menuList->ensureItemVisible(item);
+ }
+ list->setRootMenu(parent);
+ }
+ break;
+ case fullMode:
+ list = configList;
+ break;
}
+
+ if (list) {
+ item = list->findConfigItem(menu);
+ if (item) {
+ list->setSelected(item, TRUE);
+ list->ensureItemVisible(item);
+ list->setFocus();
+ }
+ }
+}
+
+void ConfigMainWindow::listFocusChanged(void)
+{
+ if (menuList->mode == menuMode)
+ configList->clearSelection();
}
void ConfigMainWindow::goBack(void)
@@ -1223,53 +1578,6 @@ void ConfigMainWindow::showFullView(void)
configList->setFocus();
}
-void ConfigMainWindow::setShowAll(bool b)
-{
- if (configList->showAll == b)
- return;
- configList->showAll = b;
- configList->updateListAll();
- menuList->showAll = b;
- menuList->updateListAll();
-}
-
-void ConfigMainWindow::setShowDebug(bool b)
-{
- if (showDebug == b)
- return;
- showDebug = b;
-}
-
-void ConfigMainWindow::setShowName(bool b)
-{
- if (configList->showName == b)
- return;
- configList->showName = b;
- configList->reinit();
- menuList->showName = b;
- menuList->reinit();
-}
-
-void ConfigMainWindow::setShowRange(bool b)
-{
- if (configList->showRange == b)
- return;
- configList->showRange = b;
- configList->reinit();
- menuList->showRange = b;
- menuList->reinit();
-}
-
-void ConfigMainWindow::setShowData(bool b)
-{
- if (configList->showData == b)
- return;
- configList->showData = b;
- configList->reinit();
- menuList->showData = b;
- menuList->reinit();
-}
-
/*
* ask for saving configuration before quitting
* TODO ask only when something changed
@@ -1324,17 +1632,10 @@ void ConfigMainWindow::showAbout(void)
void ConfigMainWindow::saveSettings(void)
{
-#if QT_VERSION >= 300
- ConfigSettings *configSettings = new ConfigSettings;
- configSettings->writeEntry("/kconfig/qconf/window x", pos().x());
- configSettings->writeEntry("/kconfig/qconf/window y", pos().y());
- configSettings->writeEntry("/kconfig/qconf/window width", size().width());
- configSettings->writeEntry("/kconfig/qconf/window height", size().height());
- configSettings->writeEntry("/kconfig/qconf/showName", configList->showName);
- configSettings->writeEntry("/kconfig/qconf/showRange", configList->showRange);
- configSettings->writeEntry("/kconfig/qconf/showData", configList->showData);
- configSettings->writeEntry("/kconfig/qconf/showAll", configList->showAll);
- configSettings->writeEntry("/kconfig/qconf/showDebug", showDebug);
+ configSettings->writeEntry("/window x", pos().x());
+ configSettings->writeEntry("/window y", pos().y());
+ configSettings->writeEntry("/window width", size().width());
+ configSettings->writeEntry("/window height", size().height());
QString entry;
switch(configList->mode) {
@@ -1350,13 +1651,10 @@ void ConfigMainWindow::saveSettings(void)
entry = "full";
break;
}
- configSettings->writeEntry("/kconfig/qconf/listMode", entry);
+ configSettings->writeEntry("/listMode", entry);
- configSettings->writeSizes("/kconfig/qconf/split1", split1->sizes());
- configSettings->writeSizes("/kconfig/qconf/split2", split2->sizes());
-
- delete configSettings;
-#endif
+ configSettings->writeSizes("/split1", split1->sizes());
+ configSettings->writeSizes("/split2", split2->sizes());
}
void fixup_rootmenu(struct menu *menu)
@@ -1414,13 +1712,19 @@ int main(int ac, char** av)
conf_read(NULL);
//zconfdump(stdout);
+ configSettings = new ConfigSettings();
+ configSettings->beginGroup("/kconfig/qconf");
v = new ConfigMainWindow();
//zconfdump(stdout);
- v->show();
+ configApp->setMainWidget(v);
configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit()));
configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings()));
+ v->show();
configApp->exec();
+ configSettings->endGroup();
+ delete configSettings;
+
return 0;
}
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index e52f3e90bf0c..6a9e3b14c227 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -7,9 +7,25 @@
#if QT_VERSION >= 300
#include <qsettings.h>
#else
-class QSettings { };
+class QSettings {
+public:
+ void beginGroup(const QString& group) { }
+ void endGroup(void) { }
+ bool readBoolEntry(const QString& key, bool def = FALSE, bool* ok = 0) const
+ { if (ok) *ok = FALSE; return def; }
+ int readNumEntry(const QString& key, int def = 0, bool* ok = 0) const
+ { if (ok) *ok = FALSE; return def; }
+ QString readEntry(const QString& key, const QString& def = QString::null, bool* ok = 0) const
+ { if (ok) *ok = FALSE; return def; }
+ QStringList readListEntry(const QString& key, bool* ok = 0) const
+ { if (ok) *ok = FALSE; return QStringList(); }
+ template <class t>
+ bool writeEntry(const QString& key, t value)
+ { return TRUE; }
+};
#endif
+class ConfigView;
class ConfigList;
class ConfigItem;
class ConfigLineEdit;
@@ -18,64 +34,38 @@ class ConfigMainWindow;
class ConfigSettings : public QSettings {
public:
- ConfigSettings();
-
-#if QT_VERSION >= 300
- void readListSettings();
QValueList<int> readSizes(const QString& key, bool *ok);
bool writeSizes(const QString& key, const QValueList<int>& value);
-#endif
-
- bool showAll;
- bool showName;
- bool showRange;
- bool showData;
-};
-
-class ConfigView : public QVBox {
- Q_OBJECT
- typedef class QVBox Parent;
-public:
- ConfigView(QWidget* parent, ConfigMainWindow* cview, ConfigSettings* configSettings);
- ~ConfigView(void);
- static void updateList(ConfigItem* item);
- static void updateListAll(void);
-
-public:
- ConfigList* list;
- ConfigLineEdit* lineEdit;
-
- static ConfigView* viewList;
- ConfigView* nextView;
};
enum colIdx {
promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr
};
enum listMode {
- singleMode, menuMode, symbolMode, fullMode
+ singleMode, menuMode, symbolMode, fullMode, listMode
};
class ConfigList : public QListView {
Q_OBJECT
typedef class QListView Parent;
public:
- ConfigList(ConfigView* p, ConfigMainWindow* cview, ConfigSettings *configSettings);
+ ConfigList(ConfigView* p, const char *name = 0);
void reinit(void);
ConfigView* parent(void) const
{
return (ConfigView*)Parent::parent();
}
+ ConfigItem* findConfigItem(struct menu *);
protected:
- ConfigMainWindow* cview;
-
void keyPressEvent(QKeyEvent *e);
void contentsMousePressEvent(QMouseEvent *e);
void contentsMouseReleaseEvent(QMouseEvent *e);
void contentsMouseMoveEvent(QMouseEvent *e);
void contentsMouseDoubleClickEvent(QMouseEvent *e);
void focusInEvent(QFocusEvent *e);
+ void contextMenuEvent(QContextMenuEvent *e);
+
public slots:
void setRootMenu(struct menu *menu);
@@ -83,10 +73,12 @@ public slots:
void setValue(ConfigItem* item, tristate val);
void changeValue(ConfigItem* item);
void updateSelection(void);
+ void saveSettings(void);
signals:
+ void menuChanged(struct menu *menu);
void menuSelected(struct menu *menu);
void parentSelected(void);
- void gotFocus(void);
+ void gotFocus(struct menu *);
public:
void updateListAll(void)
@@ -137,6 +129,7 @@ public:
struct menu *rootEntry;
QColorGroup disabledColorGroup;
QColorGroup inactivedColorGroup;
+ QPopupMenu* headerPopup;
private:
int colMap[colNr];
@@ -208,9 +201,7 @@ class ConfigLineEdit : public QLineEdit {
Q_OBJECT
typedef class QLineEdit Parent;
public:
- ConfigLineEdit(ConfigView* parent)
- : Parent(parent)
- { }
+ ConfigLineEdit(ConfigView* parent);
ConfigView* parent(void) const
{
return (ConfigView*)Parent::parent();
@@ -222,26 +213,104 @@ public:
ConfigItem *item;
};
+class ConfigView : public QVBox {
+ Q_OBJECT
+ typedef class QVBox Parent;
+public:
+ ConfigView(QWidget* parent, const char *name = 0);
+ ~ConfigView(void);
+ static void updateList(ConfigItem* item);
+ static void updateListAll(void);
+
+ bool showAll(void) const { return list->showAll; }
+ bool showName(void) const { return list->showName; }
+ bool showRange(void) const { return list->showRange; }
+ bool showData(void) const { return list->showData; }
+public slots:
+ void setShowAll(bool);
+ void setShowName(bool);
+ void setShowRange(bool);
+ void setShowData(bool);
+signals:
+ void showAllChanged(bool);
+ void showNameChanged(bool);
+ void showRangeChanged(bool);
+ void showDataChanged(bool);
+public:
+ ConfigList* list;
+ ConfigLineEdit* lineEdit;
+
+ static ConfigView* viewList;
+ ConfigView* nextView;
+};
+
+class ConfigInfoView : public QTextBrowser {
+ Q_OBJECT
+ typedef class QTextBrowser Parent;
+public:
+ ConfigInfoView(QWidget* parent, const char *name = 0);
+ bool showDebug(void) const { return _showDebug; }
+
+public slots:
+ void setInfo(struct menu *menu);
+ void saveSettings(void);
+ void setSource(const QString& name);
+ void setShowDebug(bool);
+
+signals:
+ void showDebugChanged(bool);
+ void menuSelected(struct menu *);
+
+protected:
+ void symbolInfo(void);
+ void menuInfo(void);
+ QString debug_info(struct symbol *sym);
+ static QString print_filter(const QString &str);
+ static void expr_print_help(void *data, struct symbol *sym, const char *str);
+ QPopupMenu* createPopupMenu(const QPoint& pos);
+ void contentsContextMenuEvent(QContextMenuEvent *e);
+
+ struct symbol *sym;
+ struct menu *menu;
+ bool _showDebug;
+};
+
+class ConfigSearchWindow : public QDialog {
+ Q_OBJECT
+ typedef class QDialog Parent;
+public:
+ ConfigSearchWindow(QWidget* parent, const char *name = 0);
+
+public slots:
+ void saveSettings(void);
+ void search(void);
+
+protected:
+ QLineEdit* editField;
+ QPushButton* searchButton;
+ QSplitter* split;
+ ConfigView* list;
+ ConfigInfoView* info;
+
+ struct symbol **result;
+};
+
class ConfigMainWindow : public QMainWindow {
Q_OBJECT
public:
ConfigMainWindow(void);
public slots:
- void setHelp(QListViewItem* item);
void changeMenu(struct menu *);
+ void setMenuLink(struct menu *);
void listFocusChanged(void);
void goBack(void);
void loadConfig(void);
void saveConfig(void);
void saveConfigAs(void);
+ void searchConfig(void);
void showSingleView(void);
void showSplitView(void);
void showFullView(void);
- void setShowAll(bool);
- void setShowDebug(bool);
- void setShowRange(bool);
- void setShowName(bool);
- void setShowData(bool);
void showIntro(void);
void showAbout(void);
void saveSettings(void);
@@ -249,15 +318,14 @@ public slots:
protected:
void closeEvent(QCloseEvent *e);
+ ConfigSearchWindow *searchWindow;
ConfigView *menuView;
ConfigList *menuList;
ConfigView *configView;
ConfigList *configList;
- QTextView *helpText;
+ ConfigInfoView *helpText;
QToolBar *toolBar;
QAction *backAction;
QSplitter* split1;
QSplitter* split2;
-
- bool showDebug;
};
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 3d7877afccd5..ee225ced2ce4 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -15,15 +15,15 @@
struct symbol symbol_yes = {
.name = "y",
.curr = { "y", yes },
- .flags = SYMBOL_YES|SYMBOL_VALID,
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
}, symbol_mod = {
.name = "m",
.curr = { "m", mod },
- .flags = SYMBOL_MOD|SYMBOL_VALID,
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
}, symbol_no = {
.name = "n",
.curr = { "n", no },
- .flags = SYMBOL_NO|SYMBOL_VALID,
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
}, symbol_empty = {
.name = "",
.curr = { "", no },
@@ -31,6 +31,7 @@ struct symbol symbol_yes = {
};
int sym_change_count;
+struct symbol *sym_defconfig_list;
struct symbol *modules_sym;
tristate modules_val;
@@ -227,7 +228,7 @@ static struct symbol *sym_calc_choice(struct symbol *sym)
struct expr *e;
/* is the user choice visible? */
- def_sym = sym->user.val;
+ def_sym = sym->def[S_DEF_USER].val;
if (def_sym) {
sym_calc_visibility(def_sym);
if (def_sym->visible != no)
@@ -306,7 +307,7 @@ void sym_calc_value(struct symbol *sym)
} else if (E_OR(sym->visible, sym->rev_dep.tri) != no) {
sym->flags |= SYMBOL_WRITE;
if (sym_has_value(sym))
- newval.tri = sym->user.tri;
+ newval.tri = sym->def[S_DEF_USER].tri;
else if (!sym_is_choice(sym)) {
prop = sym_get_default_prop(sym);
if (prop)
@@ -329,7 +330,7 @@ void sym_calc_value(struct symbol *sym)
if (sym->visible != no) {
sym->flags |= SYMBOL_WRITE;
if (sym_has_value(sym)) {
- newval.val = sym->user.val;
+ newval.val = sym->def[S_DEF_USER].val;
break;
}
}
@@ -352,10 +353,13 @@ void sym_calc_value(struct symbol *sym)
sym->curr.val = sym_calc_choice(sym);
sym_validate_range(sym);
- if (memcmp(&oldval, &sym->curr, sizeof(oldval)))
+ if (memcmp(&oldval, &sym->curr, sizeof(oldval))) {
sym_set_changed(sym);
- if (modules_sym == sym)
- modules_val = modules_sym->curr.tri;
+ if (modules_sym == sym) {
+ sym_set_all_changed();
+ modules_val = modules_sym->curr.tri;
+ }
+ }
if (sym_is_choice(sym)) {
int flags = sym->flags & (SYMBOL_CHANGED | SYMBOL_WRITE);
@@ -426,8 +430,8 @@ bool sym_set_tristate_value(struct symbol *sym, tristate val)
if (oldval != val && !sym_tristate_within_range(sym, val))
return false;
- if (sym->flags & SYMBOL_NEW) {
- sym->flags &= ~SYMBOL_NEW;
+ if (!(sym->flags & SYMBOL_DEF_USER)) {
+ sym->flags |= SYMBOL_DEF_USER;
sym_set_changed(sym);
}
/*
@@ -439,21 +443,18 @@ bool sym_set_tristate_value(struct symbol *sym, tristate val)
struct property *prop;
struct expr *e;
- cs->user.val = sym;
- cs->flags &= ~SYMBOL_NEW;
+ cs->def[S_DEF_USER].val = sym;
+ cs->flags |= SYMBOL_DEF_USER;
prop = sym_get_choice_prop(cs);
for (e = prop->expr; e; e = e->left.expr) {
if (e->right.sym->visible != no)
- e->right.sym->flags &= ~SYMBOL_NEW;
+ e->right.sym->flags |= SYMBOL_DEF_USER;
}
}
- sym->user.tri = val;
- if (oldval != val) {
+ sym->def[S_DEF_USER].tri = val;
+ if (oldval != val)
sym_clear_all_valid();
- if (sym == modules_sym)
- sym_set_all_changed();
- }
return true;
}
@@ -591,20 +592,20 @@ bool sym_set_string_value(struct symbol *sym, const char *newval)
if (!sym_string_within_range(sym, newval))
return false;
- if (sym->flags & SYMBOL_NEW) {
- sym->flags &= ~SYMBOL_NEW;
+ if (!(sym->flags & SYMBOL_DEF_USER)) {
+ sym->flags |= SYMBOL_DEF_USER;
sym_set_changed(sym);
}
- oldval = sym->user.val;
+ oldval = sym->def[S_DEF_USER].val;
size = strlen(newval) + 1;
if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) {
size += 2;
- sym->user.val = val = malloc(size);
+ sym->def[S_DEF_USER].val = val = malloc(size);
*val++ = '0';
*val++ = 'x';
} else if (!oldval || strcmp(oldval, newval))
- sym->user.val = val = malloc(size);
+ sym->def[S_DEF_USER].val = val = malloc(size);
else
return true;
@@ -679,7 +680,6 @@ struct symbol *sym_lookup(const char *name, int isconst)
memset(symbol, 0, sizeof(*symbol));
symbol->name = new_name;
symbol->type = S_UNKNOWN;
- symbol->flags = SYMBOL_NEW;
if (isconst)
symbol->flags |= SYMBOL_CONST;
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 656d2c87d661..e3f28b9d59f4 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -44,7 +44,9 @@ int file_write_dep(const char *name)
else
fprintf(out, "\t%s\n", file->name);
}
- fprintf(out, "\n.config include/linux/autoconf.h: $(deps_config)\n\n$(deps_config):\n");
+ fprintf(out, "\ninclude/config/auto.conf: \\\n"
+ "\t$(deps_config)\n\n"
+ "$(deps_config): ;\n");
fclose(out);
rename("..config.tmp", name);
return 0;
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index b03220600b3a..9b44c80dd899 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -39,5 +39,8 @@ string, T_TYPE, TF_COMMAND, S_STRING
select, T_SELECT, TF_COMMAND
enable, T_SELECT, TF_COMMAND
range, T_RANGE, TF_COMMAND
+option, T_OPTION, TF_COMMAND
on, T_ON, TF_PARAM
+modules, T_OPT_MODULES, TF_OPTION
+defconfig_list, T_OPT_DEFCONFIG_LIST,TF_OPTION
%%
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index 345f0fc07ca3..47c8b5babf34 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -53,10 +53,10 @@ kconf_id_hash (register const char *str, register unsigned int len)
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
- 47, 47, 47, 47, 47, 47, 47, 25, 10, 15,
- 0, 0, 5, 47, 0, 0, 47, 47, 0, 10,
- 0, 20, 20, 20, 5, 0, 0, 20, 47, 47,
- 20, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 25, 30, 15,
+ 0, 15, 0, 47, 5, 15, 47, 47, 30, 20,
+ 5, 0, 25, 15, 0, 0, 10, 35, 47, 47,
+ 5, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
@@ -88,69 +88,75 @@ kconf_id_hash (register const char *str, register unsigned int len)
struct kconf_id_strings_t
{
- char kconf_id_strings_str2[sizeof("if")];
- char kconf_id_strings_str3[sizeof("int")];
- char kconf_id_strings_str4[sizeof("help")];
- char kconf_id_strings_str5[sizeof("endif")];
- char kconf_id_strings_str6[sizeof("select")];
- char kconf_id_strings_str7[sizeof("endmenu")];
- char kconf_id_strings_str8[sizeof("tristate")];
- char kconf_id_strings_str9[sizeof("endchoice")];
+ char kconf_id_strings_str2[sizeof("on")];
+ char kconf_id_strings_str6[sizeof("string")];
+ char kconf_id_strings_str7[sizeof("default")];
+ char kconf_id_strings_str8[sizeof("def_bool")];
char kconf_id_strings_str10[sizeof("range")];
- char kconf_id_strings_str11[sizeof("string")];
- char kconf_id_strings_str12[sizeof("default")];
- char kconf_id_strings_str13[sizeof("def_bool")];
- char kconf_id_strings_str14[sizeof("menu")];
- char kconf_id_strings_str16[sizeof("def_boolean")];
- char kconf_id_strings_str17[sizeof("def_tristate")];
- char kconf_id_strings_str18[sizeof("mainmenu")];
- char kconf_id_strings_str20[sizeof("menuconfig")];
- char kconf_id_strings_str21[sizeof("config")];
- char kconf_id_strings_str22[sizeof("on")];
- char kconf_id_strings_str23[sizeof("hex")];
- char kconf_id_strings_str26[sizeof("source")];
- char kconf_id_strings_str27[sizeof("depends")];
- char kconf_id_strings_str28[sizeof("optional")];
- char kconf_id_strings_str31[sizeof("enable")];
- char kconf_id_strings_str32[sizeof("comment")];
- char kconf_id_strings_str33[sizeof("requires")];
+ char kconf_id_strings_str11[sizeof("def_boolean")];
+ char kconf_id_strings_str12[sizeof("def_tristate")];
+ char kconf_id_strings_str13[sizeof("hex")];
+ char kconf_id_strings_str14[sizeof("defconfig_list")];
+ char kconf_id_strings_str16[sizeof("option")];
+ char kconf_id_strings_str17[sizeof("if")];
+ char kconf_id_strings_str18[sizeof("optional")];
+ char kconf_id_strings_str20[sizeof("endif")];
+ char kconf_id_strings_str21[sizeof("choice")];
+ char kconf_id_strings_str22[sizeof("endmenu")];
+ char kconf_id_strings_str23[sizeof("requires")];
+ char kconf_id_strings_str24[sizeof("endchoice")];
+ char kconf_id_strings_str26[sizeof("config")];
+ char kconf_id_strings_str27[sizeof("modules")];
+ char kconf_id_strings_str28[sizeof("int")];
+ char kconf_id_strings_str29[sizeof("menu")];
+ char kconf_id_strings_str31[sizeof("prompt")];
+ char kconf_id_strings_str32[sizeof("depends")];
+ char kconf_id_strings_str33[sizeof("tristate")];
char kconf_id_strings_str34[sizeof("bool")];
+ char kconf_id_strings_str35[sizeof("menuconfig")];
+ char kconf_id_strings_str36[sizeof("select")];
char kconf_id_strings_str37[sizeof("boolean")];
- char kconf_id_strings_str41[sizeof("choice")];
- char kconf_id_strings_str46[sizeof("prompt")];
+ char kconf_id_strings_str39[sizeof("help")];
+ char kconf_id_strings_str41[sizeof("source")];
+ char kconf_id_strings_str42[sizeof("comment")];
+ char kconf_id_strings_str43[sizeof("mainmenu")];
+ char kconf_id_strings_str46[sizeof("enable")];
};
static struct kconf_id_strings_t kconf_id_strings_contents =
{
- "if",
- "int",
- "help",
- "endif",
- "select",
- "endmenu",
- "tristate",
- "endchoice",
- "range",
+ "on",
"string",
"default",
"def_bool",
- "menu",
+ "range",
"def_boolean",
"def_tristate",
- "mainmenu",
- "menuconfig",
- "config",
- "on",
"hex",
- "source",
- "depends",
+ "defconfig_list",
+ "option",
+ "if",
"optional",
- "enable",
- "comment",
+ "endif",
+ "choice",
+ "endmenu",
"requires",
+ "endchoice",
+ "config",
+ "modules",
+ "int",
+ "menu",
+ "prompt",
+ "depends",
+ "tristate",
"bool",
+ "menuconfig",
+ "select",
"boolean",
- "choice",
- "prompt"
+ "help",
+ "source",
+ "comment",
+ "mainmenu",
+ "enable"
};
#define kconf_id_strings ((const char *) &kconf_id_strings_contents)
#ifdef __GNUC__
@@ -161,9 +167,9 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{
enum
{
- TOTAL_KEYWORDS = 30,
+ TOTAL_KEYWORDS = 33,
MIN_WORD_LENGTH = 2,
- MAX_WORD_LENGTH = 12,
+ MAX_WORD_LENGTH = 14,
MIN_HASH_VALUE = 2,
MAX_HASH_VALUE = 46
};
@@ -171,43 +177,48 @@ kconf_id_lookup (register const char *str, register unsigned int len)
static struct kconf_id wordlist[] =
{
{-1}, {-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_IF, TF_COMMAND|TF_PARAM},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3, T_TYPE, TF_COMMAND, S_INT},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str4, T_HELP, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5, T_ENDIF, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str6, T_SELECT, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_ENDMENU, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_TYPE, TF_COMMAND, S_TRISTATE},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9, T_ENDCHOICE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_ON, TF_PARAM},
+ {-1}, {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str6, T_TYPE, TF_COMMAND, S_STRING},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
+ {-1},
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10, T_RANGE, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str11, T_TYPE, TF_COMMAND, S_STRING},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_MENU, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str11, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_TRISTATE},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_TYPE, TF_COMMAND, S_HEX},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_OPT_DEFCONFIG_LIST,TF_OPTION},
{-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_DEFAULT, TF_COMMAND, S_TRISTATE},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_MAINMENU, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_OPTION, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_IF, TF_COMMAND|TF_PARAM},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_OPTIONAL, TF_COMMAND},
{-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str20, T_MENUCONFIG, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_CONFIG, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ON, TF_PARAM},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_TYPE, TF_COMMAND, S_HEX},
- {-1}, {-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_SOURCE, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_DEPENDS, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPTIONAL, TF_COMMAND},
- {-1}, {-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_REQUIRES, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str20, T_ENDIF, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_CHOICE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ENDMENU, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_REQUIRES, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str24, T_ENDCHOICE, TF_COMMAND},
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_CONFIG, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_TYPE, TF_COMMAND, S_INT},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND},
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_PROMPT, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_DEPENDS, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_TYPE, TF_COMMAND, S_TRISTATE},
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str34, T_TYPE, TF_COMMAND, S_BOOLEAN},
- {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_MENUCONFIG, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_SELECT, TF_COMMAND},
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_TYPE, TF_COMMAND, S_BOOLEAN},
- {-1}, {-1}, {-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_CHOICE, TF_COMMAND},
- {-1}, {-1}, {-1}, {-1},
- {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_PROMPT, TF_COMMAND}
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str39, T_HELP, TF_COMMAND},
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_SOURCE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_COMMENT, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43, T_MAINMENU, TF_COMMAND},
+ {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_SELECT, TF_COMMAND}
};
if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index ea7755da82f5..2fb0a4fc61d0 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,7 +1,7 @@
-/* A Bison parser, made by GNU Bison 2.0. */
+/* A Bison parser, made by GNU Bison 2.1. */
/* Skeleton parser for Yacc-like parsing with Bison,
- Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -15,8 +15,8 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
/* As a special exception, when this file is copied by Bison into a
Bison output file, you may use that output file without restriction.
@@ -36,6 +36,9 @@
/* Identify Bison output. */
#define YYBISON 1
+/* Bison version. */
+#define YYBISON_VERSION "2.1"
+
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
@@ -82,19 +85,21 @@
T_DEFAULT = 276,
T_SELECT = 277,
T_RANGE = 278,
- T_ON = 279,
- T_WORD = 280,
- T_WORD_QUOTE = 281,
- T_UNEQUAL = 282,
- T_CLOSE_PAREN = 283,
- T_OPEN_PAREN = 284,
- T_EOL = 285,
- T_OR = 286,
- T_AND = 287,
- T_EQUAL = 288,
- T_NOT = 289
+ T_OPTION = 279,
+ T_ON = 280,
+ T_WORD = 281,
+ T_WORD_QUOTE = 282,
+ T_UNEQUAL = 283,
+ T_CLOSE_PAREN = 284,
+ T_OPEN_PAREN = 285,
+ T_EOL = 286,
+ T_OR = 287,
+ T_AND = 288,
+ T_EQUAL = 289,
+ T_NOT = 290
};
#endif
+/* Tokens. */
#define T_MAINMENU 258
#define T_MENU 259
#define T_ENDMENU 260
@@ -116,17 +121,18 @@
#define T_DEFAULT 276
#define T_SELECT 277
#define T_RANGE 278
-#define T_ON 279
-#define T_WORD 280
-#define T_WORD_QUOTE 281
-#define T_UNEQUAL 282
-#define T_CLOSE_PAREN 283
-#define T_OPEN_PAREN 284
-#define T_EOL 285
-#define T_OR 286
-#define T_AND 287
-#define T_EQUAL 288
-#define T_NOT 289
+#define T_OPTION 279
+#define T_ON 280
+#define T_WORD 281
+#define T_WORD_QUOTE 282
+#define T_UNEQUAL 283
+#define T_CLOSE_PAREN 284
+#define T_OPEN_PAREN 285
+#define T_EOL 286
+#define T_OR 287
+#define T_AND 288
+#define T_EQUAL 289
+#define T_NOT 290
@@ -187,6 +193,11 @@ static struct menu *current_menu, *current_entry;
# define YYERROR_VERBOSE 0
#endif
+/* Enabling the token table. */
+#ifndef YYTOKEN_TABLE
+# define YYTOKEN_TABLE 0
+#endif
+
#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
typedef union YYSTYPE {
@@ -197,7 +208,7 @@ typedef union YYSTYPE {
struct menu *menu;
struct kconf_id *id;
} YYSTYPE;
-/* Line 190 of yacc.c. */
+/* Line 196 of yacc.c. */
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
@@ -209,17 +220,36 @@ typedef union YYSTYPE {
/* Copy the second part of user declarations. */
-/* Line 213 of yacc.c. */
+/* Line 219 of yacc.c. */
-#if ! defined (yyoverflow) || YYERROR_VERBOSE
+#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
+# define YYSIZE_T __SIZE_TYPE__
+#endif
+#if ! defined (YYSIZE_T) && defined (size_t)
+# define YYSIZE_T size_t
+#endif
+#if ! defined (YYSIZE_T) && (defined (__STDC__) || defined (__cplusplus))
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+#endif
+#if ! defined (YYSIZE_T)
+# define YYSIZE_T unsigned int
+#endif
-# ifndef YYFREE
-# define YYFREE free
+#ifndef YY_
+# if YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(msgid) dgettext ("bison-runtime", msgid)
+# endif
# endif
-# ifndef YYMALLOC
-# define YYMALLOC malloc
+# ifndef YY_
+# define YY_(msgid) msgid
# endif
+#endif
+
+#if ! defined (yyoverflow) || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
@@ -229,6 +259,10 @@ typedef union YYSTYPE {
# define YYSTACK_ALLOC __builtin_alloca
# else
# define YYSTACK_ALLOC alloca
+# if defined (__STDC__) || defined (__cplusplus)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# define YYINCLUDED_STDLIB_H
+# endif
# endif
# endif
# endif
@@ -236,13 +270,39 @@ typedef union YYSTYPE {
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
-# else
-# if defined (__STDC__) || defined (__cplusplus)
-# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# define YYSIZE_T size_t
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2005 */
# endif
+# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM ((YYSIZE_T) -1)
+# endif
+# ifdef __cplusplus
+extern "C" {
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if (! defined (malloc) && ! defined (YYINCLUDED_STDLIB_H) \
+ && (defined (__STDC__) || defined (__cplusplus)))
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if (! defined (free) && ! defined (YYINCLUDED_STDLIB_H) \
+ && (defined (__STDC__) || defined (__cplusplus)))
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifdef __cplusplus
+}
+# endif
# endif
#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
@@ -277,7 +337,7 @@ union yyalloc
# define YYCOPY(To, From, Count) \
do \
{ \
- register YYSIZE_T yyi; \
+ YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
@@ -312,22 +372,22 @@ union yyalloc
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 3
/* YYLAST -- Last index in YYTABLE. */
-#define YYLAST 264
+#define YYLAST 275
/* YYNTOKENS -- Number of terminals. */
-#define YYNTOKENS 35
+#define YYNTOKENS 36
/* YYNNTS -- Number of nonterminals. */
-#define YYNNTS 42
+#define YYNNTS 45
/* YYNRULES -- Number of rules. */
-#define YYNRULES 104
+#define YYNRULES 110
/* YYNRULES -- Number of states. */
-#define YYNSTATES 175
+#define YYNSTATES 183
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
-#define YYMAXUTOK 289
+#define YYMAXUTOK 290
-#define YYTRANSLATE(YYX) \
+#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
@@ -361,7 +421,8 @@ static const unsigned char yytranslate[] =
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35
};
#if YYDEBUG
@@ -372,72 +433,75 @@ static const unsigned short int yyprhs[] =
0, 0, 3, 5, 6, 9, 12, 15, 20, 23,
28, 33, 37, 39, 41, 43, 45, 47, 49, 51,
53, 55, 57, 59, 61, 63, 67, 70, 74, 77,
- 81, 84, 85, 88, 91, 94, 97, 100, 104, 109,
- 114, 119, 125, 128, 131, 133, 137, 138, 141, 144,
- 147, 150, 153, 158, 162, 165, 170, 171, 174, 178,
- 180, 184, 185, 188, 191, 194, 198, 201, 203, 207,
- 208, 211, 214, 217, 221, 225, 228, 231, 234, 235,
- 238, 241, 244, 249, 253, 257, 258, 261, 263, 265,
- 268, 271, 274, 276, 279, 280, 283, 285, 289, 293,
- 297, 300, 304, 308, 310
+ 81, 84, 85, 88, 91, 94, 97, 100, 103, 107,
+ 112, 117, 122, 128, 132, 133, 137, 138, 141, 144,
+ 147, 149, 153, 154, 157, 160, 163, 166, 169, 174,
+ 178, 181, 186, 187, 190, 194, 196, 200, 201, 204,
+ 207, 210, 214, 217, 219, 223, 224, 227, 230, 233,
+ 237, 241, 244, 247, 250, 251, 254, 257, 260, 265,
+ 269, 273, 274, 277, 279, 281, 284, 287, 290, 292,
+ 295, 296, 299, 301, 305, 309, 313, 316, 320, 324,
+ 326
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yysigned_char yyrhs[] =
{
- 36, 0, -1, 37, -1, -1, 37, 39, -1, 37,
- 50, -1, 37, 61, -1, 37, 3, 71, 73, -1,
- 37, 72, -1, 37, 25, 1, 30, -1, 37, 38,
- 1, 30, -1, 37, 1, 30, -1, 16, -1, 19,
+ 37, 0, -1, 38, -1, -1, 38, 40, -1, 38,
+ 54, -1, 38, 65, -1, 38, 3, 75, 77, -1,
+ 38, 76, -1, 38, 26, 1, 31, -1, 38, 39,
+ 1, 31, -1, 38, 1, 31, -1, 16, -1, 19,
-1, 20, -1, 22, -1, 18, -1, 23, -1, 21,
- -1, 30, -1, 56, -1, 65, -1, 42, -1, 44,
- -1, 63, -1, 25, 1, 30, -1, 1, 30, -1,
- 10, 25, 30, -1, 41, 45, -1, 11, 25, 30,
- -1, 43, 45, -1, -1, 45, 46, -1, 45, 69,
- -1, 45, 67, -1, 45, 40, -1, 45, 30, -1,
- 20, 70, 30, -1, 19, 71, 74, 30, -1, 21,
- 75, 74, 30, -1, 22, 25, 74, 30, -1, 23,
- 76, 76, 74, 30, -1, 7, 30, -1, 47, 51,
- -1, 72, -1, 48, 53, 49, -1, -1, 51, 52,
- -1, 51, 69, -1, 51, 67, -1, 51, 30, -1,
- 51, 40, -1, 19, 71, 74, 30, -1, 20, 70,
- 30, -1, 18, 30, -1, 21, 25, 74, 30, -1,
- -1, 53, 39, -1, 14, 75, 73, -1, 72, -1,
- 54, 57, 55, -1, -1, 57, 39, -1, 57, 61,
- -1, 57, 50, -1, 4, 71, 30, -1, 58, 68,
- -1, 72, -1, 59, 62, 60, -1, -1, 62, 39,
- -1, 62, 61, -1, 62, 50, -1, 6, 71, 30,
- -1, 9, 71, 30, -1, 64, 68, -1, 12, 30,
- -1, 66, 13, -1, -1, 68, 69, -1, 68, 30,
- -1, 68, 40, -1, 16, 24, 75, 30, -1, 16,
- 75, 30, -1, 17, 75, 30, -1, -1, 71, 74,
- -1, 25, -1, 26, -1, 5, 30, -1, 8, 30,
- -1, 15, 30, -1, 30, -1, 73, 30, -1, -1,
- 14, 75, -1, 76, -1, 76, 33, 76, -1, 76,
- 27, 76, -1, 29, 75, 28, -1, 34, 75, -1,
- 75, 31, 75, -1, 75, 32, 75, -1, 25, -1,
- 26, -1
+ -1, 31, -1, 60, -1, 69, -1, 43, -1, 45,
+ -1, 67, -1, 26, 1, 31, -1, 1, 31, -1,
+ 10, 26, 31, -1, 42, 46, -1, 11, 26, 31,
+ -1, 44, 46, -1, -1, 46, 47, -1, 46, 48,
+ -1, 46, 73, -1, 46, 71, -1, 46, 41, -1,
+ 46, 31, -1, 20, 74, 31, -1, 19, 75, 78,
+ 31, -1, 21, 79, 78, 31, -1, 22, 26, 78,
+ 31, -1, 23, 80, 80, 78, 31, -1, 24, 49,
+ 31, -1, -1, 49, 26, 50, -1, -1, 34, 75,
+ -1, 7, 31, -1, 51, 55, -1, 76, -1, 52,
+ 57, 53, -1, -1, 55, 56, -1, 55, 73, -1,
+ 55, 71, -1, 55, 31, -1, 55, 41, -1, 19,
+ 75, 78, 31, -1, 20, 74, 31, -1, 18, 31,
+ -1, 21, 26, 78, 31, -1, -1, 57, 40, -1,
+ 14, 79, 77, -1, 76, -1, 58, 61, 59, -1,
+ -1, 61, 40, -1, 61, 65, -1, 61, 54, -1,
+ 4, 75, 31, -1, 62, 72, -1, 76, -1, 63,
+ 66, 64, -1, -1, 66, 40, -1, 66, 65, -1,
+ 66, 54, -1, 6, 75, 31, -1, 9, 75, 31,
+ -1, 68, 72, -1, 12, 31, -1, 70, 13, -1,
+ -1, 72, 73, -1, 72, 31, -1, 72, 41, -1,
+ 16, 25, 79, 31, -1, 16, 79, 31, -1, 17,
+ 79, 31, -1, -1, 75, 78, -1, 26, -1, 27,
+ -1, 5, 31, -1, 8, 31, -1, 15, 31, -1,
+ 31, -1, 77, 31, -1, -1, 14, 79, -1, 80,
+ -1, 80, 34, 80, -1, 80, 28, 80, -1, 30,
+ 79, 29, -1, 35, 79, -1, 79, 32, 79, -1,
+ 79, 33, 79, -1, 26, -1, 27, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const unsigned short int yyrline[] =
{
- 0, 103, 103, 105, 107, 108, 109, 110, 111, 112,
- 113, 117, 121, 121, 121, 121, 121, 121, 121, 125,
- 126, 127, 128, 129, 130, 134, 135, 141, 149, 155,
- 163, 173, 175, 176, 177, 178, 179, 182, 190, 196,
- 206, 212, 220, 229, 234, 242, 245, 247, 248, 249,
- 250, 251, 254, 260, 271, 277, 287, 289, 294, 302,
- 310, 313, 315, 316, 317, 322, 329, 334, 342, 345,
- 347, 348, 349, 352, 360, 367, 374, 380, 387, 389,
- 390, 391, 394, 399, 404, 412, 414, 419, 420, 423,
- 424, 425, 429, 430, 433, 434, 437, 438, 439, 440,
- 441, 442, 443, 446, 447
+ 0, 105, 105, 107, 109, 110, 111, 112, 113, 114,
+ 115, 119, 123, 123, 123, 123, 123, 123, 123, 127,
+ 128, 129, 130, 131, 132, 136, 137, 143, 151, 157,
+ 165, 175, 177, 178, 179, 180, 181, 182, 185, 193,
+ 199, 209, 215, 221, 224, 226, 237, 238, 243, 252,
+ 257, 265, 268, 270, 271, 272, 273, 274, 277, 283,
+ 294, 300, 310, 312, 317, 325, 333, 336, 338, 339,
+ 340, 345, 352, 357, 365, 368, 370, 371, 372, 375,
+ 383, 390, 397, 403, 410, 412, 413, 414, 417, 422,
+ 427, 435, 437, 442, 443, 446, 447, 448, 452, 453,
+ 456, 457, 460, 461, 462, 463, 464, 465, 466, 469,
+ 470
};
#endif
-#if YYDEBUG || YYERROR_VERBOSE
-/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
@@ -445,17 +509,18 @@ static const char *const yytname[] =
"T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
"T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
"T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT",
- "T_SELECT", "T_RANGE", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
- "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
- "T_NOT", "$accept", "input", "stmt_list", "option_name", "common_stmt",
- "option_error", "config_entry_start", "config_stmt",
+ "T_SELECT", "T_RANGE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE",
+ "T_UNEQUAL", "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND",
+ "T_EQUAL", "T_NOT", "$accept", "input", "stmt_list", "option_name",
+ "common_stmt", "option_error", "config_entry_start", "config_stmt",
"menuconfig_entry_start", "menuconfig_stmt", "config_option_list",
- "config_option", "choice", "choice_entry", "choice_end", "choice_stmt",
- "choice_option_list", "choice_option", "choice_block", "if_entry",
- "if_end", "if_stmt", "if_block", "menu", "menu_entry", "menu_end",
- "menu_stmt", "menu_block", "source_stmt", "comment", "comment_stmt",
- "help_start", "help", "depends_list", "depends", "prompt_stmt_opt",
- "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
+ "config_option", "symbol_option", "symbol_option_list",
+ "symbol_option_arg", "choice", "choice_entry", "choice_end",
+ "choice_stmt", "choice_option_list", "choice_option", "choice_block",
+ "if_entry", "if_end", "if_stmt", "if_block", "menu", "menu_entry",
+ "menu_end", "menu_stmt", "menu_block", "source_stmt", "comment",
+ "comment_stmt", "help_start", "help", "depends_list", "depends",
+ "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
};
#endif
@@ -467,24 +532,25 @@ static const unsigned short int yytoknum[] =
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
- 285, 286, 287, 288, 289
+ 285, 286, 287, 288, 289, 290
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const unsigned char yyr1[] =
{
- 0, 35, 36, 37, 37, 37, 37, 37, 37, 37,
- 37, 37, 38, 38, 38, 38, 38, 38, 38, 39,
- 39, 39, 39, 39, 39, 40, 40, 41, 42, 43,
- 44, 45, 45, 45, 45, 45, 45, 46, 46, 46,
- 46, 46, 47, 48, 49, 50, 51, 51, 51, 51,
- 51, 51, 52, 52, 52, 52, 53, 53, 54, 55,
- 56, 57, 57, 57, 57, 58, 59, 60, 61, 62,
- 62, 62, 62, 63, 64, 65, 66, 67, 68, 68,
- 68, 68, 69, 69, 69, 70, 70, 71, 71, 72,
- 72, 72, 73, 73, 74, 74, 75, 75, 75, 75,
- 75, 75, 75, 76, 76
+ 0, 36, 37, 38, 38, 38, 38, 38, 38, 38,
+ 38, 38, 39, 39, 39, 39, 39, 39, 39, 40,
+ 40, 40, 40, 40, 40, 41, 41, 42, 43, 44,
+ 45, 46, 46, 46, 46, 46, 46, 46, 47, 47,
+ 47, 47, 47, 48, 49, 49, 50, 50, 51, 52,
+ 53, 54, 55, 55, 55, 55, 55, 55, 56, 56,
+ 56, 56, 57, 57, 58, 59, 60, 61, 61, 61,
+ 61, 62, 63, 64, 65, 66, 66, 66, 66, 67,
+ 68, 69, 70, 71, 72, 72, 72, 72, 73, 73,
+ 73, 74, 74, 75, 75, 76, 76, 76, 77, 77,
+ 78, 78, 79, 79, 79, 79, 79, 79, 79, 80,
+ 80
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
@@ -493,14 +559,15 @@ static const unsigned char yyr2[] =
0, 2, 1, 0, 2, 2, 2, 4, 2, 4,
4, 3, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 3, 2, 3, 2, 3,
- 2, 0, 2, 2, 2, 2, 2, 3, 4, 4,
- 4, 5, 2, 2, 1, 3, 0, 2, 2, 2,
- 2, 2, 4, 3, 2, 4, 0, 2, 3, 1,
- 3, 0, 2, 2, 2, 3, 2, 1, 3, 0,
- 2, 2, 2, 3, 3, 2, 2, 2, 0, 2,
- 2, 2, 4, 3, 3, 0, 2, 1, 1, 2,
- 2, 2, 1, 2, 0, 2, 1, 3, 3, 3,
- 2, 3, 3, 1, 1
+ 2, 0, 2, 2, 2, 2, 2, 2, 3, 4,
+ 4, 4, 5, 3, 0, 3, 0, 2, 2, 2,
+ 1, 3, 0, 2, 2, 2, 2, 2, 4, 3,
+ 2, 4, 0, 2, 3, 1, 3, 0, 2, 2,
+ 2, 3, 2, 1, 3, 0, 2, 2, 2, 3,
+ 3, 2, 2, 2, 0, 2, 2, 2, 4, 3,
+ 3, 0, 2, 1, 1, 2, 2, 2, 1, 2,
+ 0, 2, 1, 3, 3, 3, 2, 3, 3, 1,
+ 1
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -511,175 +578,164 @@ static const unsigned char yydefact[] =
3, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 12, 16, 13, 14,
18, 15, 17, 0, 19, 0, 4, 31, 22, 31,
- 23, 46, 56, 5, 61, 20, 78, 69, 6, 24,
- 78, 21, 8, 11, 87, 88, 0, 0, 89, 0,
- 42, 90, 0, 0, 0, 103, 104, 0, 0, 0,
- 96, 91, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 92, 7, 65, 73, 74, 27, 29, 0,
- 100, 0, 0, 58, 0, 0, 9, 10, 0, 0,
- 0, 0, 0, 85, 0, 0, 0, 0, 36, 35,
- 32, 0, 34, 33, 0, 0, 85, 0, 50, 51,
- 47, 49, 48, 57, 45, 44, 62, 64, 60, 63,
- 59, 80, 81, 79, 70, 72, 68, 71, 67, 93,
- 99, 101, 102, 98, 97, 26, 76, 0, 0, 0,
- 94, 0, 94, 94, 94, 0, 0, 77, 54, 94,
- 0, 94, 0, 83, 84, 0, 0, 37, 86, 0,
- 0, 94, 25, 0, 53, 0, 82, 95, 38, 39,
- 40, 0, 52, 55, 41
+ 23, 52, 62, 5, 67, 20, 84, 75, 6, 24,
+ 84, 21, 8, 11, 93, 94, 0, 0, 95, 0,
+ 48, 96, 0, 0, 0, 109, 110, 0, 0, 0,
+ 102, 97, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 98, 7, 71, 79, 80, 27, 29, 0,
+ 106, 0, 0, 64, 0, 0, 9, 10, 0, 0,
+ 0, 0, 0, 91, 0, 0, 0, 44, 0, 37,
+ 36, 32, 33, 0, 35, 34, 0, 0, 91, 0,
+ 56, 57, 53, 55, 54, 63, 51, 50, 68, 70,
+ 66, 69, 65, 86, 87, 85, 76, 78, 74, 77,
+ 73, 99, 105, 107, 108, 104, 103, 26, 82, 0,
+ 0, 0, 100, 0, 100, 100, 100, 0, 0, 0,
+ 83, 60, 100, 0, 100, 0, 89, 90, 0, 0,
+ 38, 92, 0, 0, 100, 46, 43, 25, 0, 59,
+ 0, 88, 101, 39, 40, 41, 0, 0, 45, 58,
+ 61, 42, 47
};
/* YYDEFGOTO[NTERM-NUM]. */
static const short int yydefgoto[] =
{
- -1, 1, 2, 25, 26, 99, 27, 28, 29, 30,
- 64, 100, 31, 32, 114, 33, 66, 110, 67, 34,
- 118, 35, 68, 36, 37, 126, 38, 70, 39, 40,
- 41, 101, 102, 69, 103, 141, 142, 42, 73, 156,
- 59, 60
+ -1, 1, 2, 25, 26, 100, 27, 28, 29, 30,
+ 64, 101, 102, 148, 178, 31, 32, 116, 33, 66,
+ 112, 67, 34, 120, 35, 68, 36, 37, 128, 38,
+ 70, 39, 40, 41, 103, 104, 69, 105, 143, 144,
+ 42, 73, 159, 59, 60
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
-#define YYPACT_NINF -78
+#define YYPACT_NINF -135
static const short int yypact[] =
{
- -78, 2, 159, -78, -21, 0, 0, -12, 0, 1,
- 4, 0, 27, 38, 60, 58, -78, -78, -78, -78,
- -78, -78, -78, 100, -78, 104, -78, -78, -78, -78,
- -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
- -78, -78, -78, -78, -78, -78, 86, 113, -78, 114,
- -78, -78, 125, 127, 128, -78, -78, 60, 60, 210,
- 65, -78, 141, 142, 39, 103, 182, 200, 6, 66,
- 6, 131, -78, 146, -78, -78, -78, -78, -78, 196,
- -78, 60, 60, 146, 40, 40, -78, -78, 155, 156,
- -2, 60, 0, 0, 60, 105, 40, 194, -78, -78,
- -78, 206, -78, -78, 183, 0, 0, 195, -78, -78,
- -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
- -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
- -78, 197, -78, -78, -78, -78, -78, 60, 213, 216,
- 212, 203, 212, 190, 212, 40, 208, -78, -78, 212,
- 222, 212, 219, -78, -78, 60, 223, -78, -78, 224,
- 225, 212, -78, 226, -78, 227, -78, 47, -78, -78,
- -78, 228, -78, -78, -78
+ -135, 2, 170, -135, -14, 56, 56, -8, 56, 24,
+ 67, 56, 7, 14, 62, 97, -135, -135, -135, -135,
+ -135, -135, -135, 156, -135, 166, -135, -135, -135, -135,
+ -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
+ -135, -135, -135, -135, -135, -135, 138, 151, -135, 152,
+ -135, -135, 163, 167, 176, -135, -135, 62, 62, 185,
+ -19, -135, 188, 190, 42, 103, 194, 85, 70, 222,
+ 70, 132, -135, 191, -135, -135, -135, -135, -135, 127,
+ -135, 62, 62, 191, 104, 104, -135, -135, 193, 203,
+ 9, 62, 56, 56, 62, 161, 104, -135, 196, -135,
+ -135, -135, -135, 233, -135, -135, 204, 56, 56, 221,
+ -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
+ -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
+ -135, -135, -135, 219, -135, -135, -135, -135, -135, 62,
+ 209, 212, 240, 224, 240, -1, 240, 104, 41, 225,
+ -135, -135, 240, 226, 240, 218, -135, -135, 62, 227,
+ -135, -135, 228, 229, 240, 230, -135, -135, 231, -135,
+ 232, -135, 112, -135, -135, -135, 234, 56, -135, -135,
+ -135, -135, -135
};
/* YYPGOTO[NTERM-NUM]. */
static const short int yypgoto[] =
{
- -78, -78, -78, -78, 164, -36, -78, -78, -78, -78,
- 230, -78, -78, -78, -78, 29, -78, -78, -78, -78,
- -78, -78, -78, -78, -78, -78, 59, -78, -78, -78,
- -78, -78, 198, 220, 24, 157, -5, 169, 202, 74,
- -53, -77
+ -135, -135, -135, -135, 94, -45, -135, -135, -135, -135,
+ 237, -135, -135, -135, -135, -135, -135, -135, -54, -135,
+ -135, -135, -135, -135, -135, -135, -135, -135, -135, 1,
+ -135, -135, -135, -135, -135, 195, 235, -44, 159, -5,
+ 98, 210, -134, -53, -77
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
-#define YYTABLE_NINF -76
+#define YYTABLE_NINF -82
static const short int yytable[] =
{
- 46, 47, 3, 49, 79, 80, 52, 133, 134, 43,
- 6, 7, 8, 9, 10, 11, 12, 13, 48, 145,
- 14, 15, 137, 55, 56, 44, 45, 57, 131, 132,
- 109, 50, 58, 122, 51, 122, 24, 138, 139, -28,
- 88, 143, -28, -28, -28, -28, -28, -28, -28, -28,
- -28, 89, 53, -28, -28, 90, 91, -28, 92, 93,
- 94, 95, 96, 54, 97, 55, 56, 88, 161, 98,
- -66, -66, -66, -66, -66, -66, -66, -66, 81, 82,
- -66, -66, 90, 91, 152, 55, 56, 140, 61, 57,
- 112, 97, 84, 123, 58, 123, 121, 117, 85, 125,
- 149, 62, 167, -30, 88, 63, -30, -30, -30, -30,
- -30, -30, -30, -30, -30, 89, 72, -30, -30, 90,
- 91, -30, 92, 93, 94, 95, 96, 119, 97, 127,
- 144, -75, 88, 98, -75, -75, -75, -75, -75, -75,
- -75, -75, -75, 74, 75, -75, -75, 90, 91, -75,
- -75, -75, -75, -75, -75, 76, 97, 77, 78, -2,
- 4, 121, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 86, 87, 14, 15, 16, 129, 17, 18, 19,
- 20, 21, 22, 88, 23, 135, 136, -43, -43, 24,
- -43, -43, -43, -43, 89, 146, -43, -43, 90, 91,
- 104, 105, 106, 107, 155, 7, 8, 97, 10, 11,
- 12, 13, 108, 148, 14, 15, 158, 159, 160, 147,
- 151, 81, 82, 163, 130, 165, 155, 81, 82, 82,
- 24, 113, 116, 157, 124, 171, 115, 120, 162, 128,
- 72, 81, 82, 153, 81, 82, 154, 81, 82, 166,
- 81, 82, 164, 168, 169, 170, 172, 173, 174, 65,
- 71, 83, 0, 150, 111
+ 46, 47, 3, 49, 79, 80, 52, 135, 136, 84,
+ 161, 162, 163, 158, 119, 85, 127, 43, 168, 147,
+ 170, 111, 114, 48, 124, 125, 124, 125, 133, 134,
+ 176, 81, 82, 53, 139, 55, 56, 140, 141, 57,
+ 54, 145, -28, 88, 58, -28, -28, -28, -28, -28,
+ -28, -28, -28, -28, 89, 50, -28, -28, 90, 91,
+ -28, 92, 93, 94, 95, 96, 97, 165, 98, 121,
+ 164, 129, 166, 99, 6, 7, 8, 9, 10, 11,
+ 12, 13, 44, 45, 14, 15, 155, 142, 55, 56,
+ 7, 8, 57, 10, 11, 12, 13, 58, 51, 14,
+ 15, 24, 152, -30, 88, 172, -30, -30, -30, -30,
+ -30, -30, -30, -30, -30, 89, 24, -30, -30, 90,
+ 91, -30, 92, 93, 94, 95, 96, 97, 61, 98,
+ 55, 56, -81, 88, 99, -81, -81, -81, -81, -81,
+ -81, -81, -81, -81, 81, 82, -81, -81, 90, 91,
+ -81, -81, -81, -81, -81, -81, 132, 62, 98, 81,
+ 82, 115, 118, 123, 126, 117, 122, 63, 130, 72,
+ -2, 4, 182, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 74, 75, 14, 15, 16, 146, 17, 18,
+ 19, 20, 21, 22, 76, 88, 23, 149, 77, -49,
+ -49, 24, -49, -49, -49, -49, 89, 78, -49, -49,
+ 90, 91, 106, 107, 108, 109, 72, 81, 82, 86,
+ 98, 87, 131, 88, 137, 110, -72, -72, -72, -72,
+ -72, -72, -72, -72, 138, 151, -72, -72, 90, 91,
+ 156, 81, 82, 157, 81, 82, 150, 154, 98, 171,
+ 81, 82, 82, 123, 158, 160, 167, 169, 173, 174,
+ 175, 113, 179, 180, 177, 181, 65, 153, 0, 83,
+ 0, 0, 0, 0, 0, 71
};
static const short int yycheck[] =
{
- 5, 6, 0, 8, 57, 58, 11, 84, 85, 30,
- 4, 5, 6, 7, 8, 9, 10, 11, 30, 96,
- 14, 15, 24, 25, 26, 25, 26, 29, 81, 82,
- 66, 30, 34, 69, 30, 71, 30, 90, 91, 0,
- 1, 94, 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 25, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 25, 25, 25, 26, 1, 145, 30,
- 4, 5, 6, 7, 8, 9, 10, 11, 31, 32,
- 14, 15, 16, 17, 137, 25, 26, 92, 30, 29,
- 66, 25, 27, 69, 34, 71, 30, 68, 33, 70,
- 105, 1, 155, 0, 1, 1, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 30, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 68, 25, 70,
- 25, 0, 1, 30, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 30, 30, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 30, 25, 30, 30, 0,
- 1, 30, 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 30, 30, 14, 15, 16, 30, 18, 19, 20,
- 21, 22, 23, 1, 25, 30, 30, 5, 6, 30,
- 8, 9, 10, 11, 12, 1, 14, 15, 16, 17,
- 18, 19, 20, 21, 14, 5, 6, 25, 8, 9,
- 10, 11, 30, 30, 14, 15, 142, 143, 144, 13,
- 25, 31, 32, 149, 28, 151, 14, 31, 32, 32,
- 30, 67, 68, 30, 70, 161, 67, 68, 30, 70,
- 30, 31, 32, 30, 31, 32, 30, 31, 32, 30,
- 31, 32, 30, 30, 30, 30, 30, 30, 30, 29,
- 40, 59, -1, 106, 66
+ 5, 6, 0, 8, 57, 58, 11, 84, 85, 28,
+ 144, 145, 146, 14, 68, 34, 70, 31, 152, 96,
+ 154, 66, 66, 31, 69, 69, 71, 71, 81, 82,
+ 164, 32, 33, 26, 25, 26, 27, 90, 91, 30,
+ 26, 94, 0, 1, 35, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 31, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 26, 26, 68,
+ 147, 70, 31, 31, 4, 5, 6, 7, 8, 9,
+ 10, 11, 26, 27, 14, 15, 139, 92, 26, 27,
+ 5, 6, 30, 8, 9, 10, 11, 35, 31, 14,
+ 15, 31, 107, 0, 1, 158, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 31, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 31, 26,
+ 26, 27, 0, 1, 31, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 32, 33, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 29, 1, 26, 32,
+ 33, 67, 68, 31, 70, 67, 68, 1, 70, 31,
+ 0, 1, 177, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 31, 31, 14, 15, 16, 26, 18, 19,
+ 20, 21, 22, 23, 31, 1, 26, 1, 31, 5,
+ 6, 31, 8, 9, 10, 11, 12, 31, 14, 15,
+ 16, 17, 18, 19, 20, 21, 31, 32, 33, 31,
+ 26, 31, 31, 1, 31, 31, 4, 5, 6, 7,
+ 8, 9, 10, 11, 31, 31, 14, 15, 16, 17,
+ 31, 32, 33, 31, 32, 33, 13, 26, 26, 31,
+ 32, 33, 33, 31, 14, 31, 31, 31, 31, 31,
+ 31, 66, 31, 31, 34, 31, 29, 108, -1, 59,
+ -1, -1, -1, -1, -1, 40
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const unsigned char yystos[] =
{
- 0, 36, 37, 0, 1, 3, 4, 5, 6, 7,
+ 0, 37, 38, 0, 1, 3, 4, 5, 6, 7,
8, 9, 10, 11, 14, 15, 16, 18, 19, 20,
- 21, 22, 23, 25, 30, 38, 39, 41, 42, 43,
- 44, 47, 48, 50, 54, 56, 58, 59, 61, 63,
- 64, 65, 72, 30, 25, 26, 71, 71, 30, 71,
- 30, 30, 71, 25, 25, 25, 26, 29, 34, 75,
- 76, 30, 1, 1, 45, 45, 51, 53, 57, 68,
- 62, 68, 30, 73, 30, 30, 30, 30, 30, 75,
- 75, 31, 32, 73, 27, 33, 30, 30, 1, 12,
- 16, 17, 19, 20, 21, 22, 23, 25, 30, 40,
- 46, 66, 67, 69, 18, 19, 20, 21, 30, 40,
- 52, 67, 69, 39, 49, 72, 39, 50, 55, 61,
- 72, 30, 40, 69, 39, 50, 60, 61, 72, 30,
- 28, 75, 75, 76, 76, 30, 30, 24, 75, 75,
- 71, 70, 71, 75, 25, 76, 1, 13, 30, 71,
- 70, 25, 75, 30, 30, 14, 74, 30, 74, 74,
- 74, 76, 30, 74, 30, 74, 30, 75, 30, 30,
- 30, 74, 30, 30, 30
+ 21, 22, 23, 26, 31, 39, 40, 42, 43, 44,
+ 45, 51, 52, 54, 58, 60, 62, 63, 65, 67,
+ 68, 69, 76, 31, 26, 27, 75, 75, 31, 75,
+ 31, 31, 75, 26, 26, 26, 27, 30, 35, 79,
+ 80, 31, 1, 1, 46, 46, 55, 57, 61, 72,
+ 66, 72, 31, 77, 31, 31, 31, 31, 31, 79,
+ 79, 32, 33, 77, 28, 34, 31, 31, 1, 12,
+ 16, 17, 19, 20, 21, 22, 23, 24, 26, 31,
+ 41, 47, 48, 70, 71, 73, 18, 19, 20, 21,
+ 31, 41, 56, 71, 73, 40, 53, 76, 40, 54,
+ 59, 65, 76, 31, 41, 73, 40, 54, 64, 65,
+ 76, 31, 29, 79, 79, 80, 80, 31, 31, 25,
+ 79, 79, 75, 74, 75, 79, 26, 80, 49, 1,
+ 13, 31, 75, 74, 26, 79, 31, 31, 14, 78,
+ 31, 78, 78, 78, 80, 26, 31, 31, 78, 31,
+ 78, 31, 79, 31, 31, 31, 78, 34, 50, 31,
+ 31, 31, 75
};
-#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
-# define YYSIZE_T __SIZE_TYPE__
-#endif
-#if ! defined (YYSIZE_T) && defined (size_t)
-# define YYSIZE_T size_t
-#endif
-#if ! defined (YYSIZE_T)
-# if defined (__STDC__) || defined (__cplusplus)
-# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
-# define YYSIZE_T size_t
-# endif
-#endif
-#if ! defined (YYSIZE_T)
-# define YYSIZE_T unsigned int
-#endif
-
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
@@ -709,8 +765,8 @@ do \
goto yybackup; \
} \
else \
- { \
- yyerror ("syntax error: cannot back up");\
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (0)
@@ -789,7 +845,7 @@ do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
- yysymprint (stderr, \
+ yysymprint (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
@@ -837,13 +893,13 @@ yy_reduce_print (yyrule)
#endif
{
int yyi;
- unsigned int yylno = yyrline[yyrule];
- YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ",
+ unsigned long int yylno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu), ",
yyrule - 1, yylno);
/* Print the symbols being reduced, and their result. */
for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
- YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]);
- YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]);
+ YYFPRINTF (stderr, "%s ", yytname[yyrhs[yyi]]);
+ YYFPRINTF (stderr, "-> %s\n", yytname[yyr1[yyrule]]);
}
# define YY_REDUCE_PRINT(Rule) \
@@ -872,7 +928,7 @@ int yydebug;
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
- SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH)
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
@@ -896,7 +952,7 @@ yystrlen (yystr)
const char *yystr;
# endif
{
- register const char *yys = yystr;
+ const char *yys = yystr;
while (*yys++ != '\0')
continue;
@@ -921,8 +977,8 @@ yystpcpy (yydest, yysrc)
const char *yysrc;
# endif
{
- register char *yyd = yydest;
- register const char *yys = yysrc;
+ char *yyd = yydest;
+ const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
@@ -932,7 +988,55 @@ yystpcpy (yydest, yysrc)
# endif
# endif
-#endif /* !YYERROR_VERBOSE */
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ size_t yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+#endif /* YYERROR_VERBOSE */
@@ -998,7 +1102,7 @@ yydestruct (yymsg, yytype, yyvaluep)
switch (yytype)
{
- case 48: /* choice_entry */
+ case 52: /* "choice_entry" */
{
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1008,7 +1112,7 @@ yydestruct (yymsg, yytype, yyvaluep)
};
break;
- case 54: /* if_entry */
+ case 58: /* "if_entry" */
{
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1018,7 +1122,7 @@ yydestruct (yymsg, yytype, yyvaluep)
};
break;
- case 59: /* menu_entry */
+ case 63: /* "menu_entry" */
{
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1082,13 +1186,13 @@ yyparse (void)
#else
int
yyparse ()
-
+ ;
#endif
#endif
{
- register int yystate;
- register int yyn;
+ int yystate;
+ int yyn;
int yyresult;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
@@ -1106,12 +1210,12 @@ yyparse ()
/* The state stack. */
short int yyssa[YYINITDEPTH];
short int *yyss = yyssa;
- register short int *yyssp;
+ short int *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs = yyvsa;
- register YYSTYPE *yyvsp;
+ YYSTYPE *yyvsp;
@@ -1143,9 +1247,6 @@ yyparse ()
yyssp = yyss;
yyvsp = yyvs;
-
- yyvsp[0] = yylval;
-
goto yysetstate;
/*------------------------------------------------------------.
@@ -1178,7 +1279,7 @@ yyparse ()
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
- yyoverflow ("parser stack overflow",
+ yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
@@ -1189,11 +1290,11 @@ yyparse ()
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
- goto yyoverflowlab;
+ goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
- goto yyoverflowlab;
+ goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
@@ -1203,7 +1304,7 @@ yyparse ()
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
- goto yyoverflowlab;
+ goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss);
YYSTACK_RELOCATE (yyvs);
@@ -1403,7 +1504,7 @@ yyreduce:
;}
break;
- case 37:
+ case 38:
{
menu_set_type((yyvsp[-2].id)->stype);
@@ -1413,7 +1514,7 @@ yyreduce:
;}
break;
- case 38:
+ case 39:
{
menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
@@ -1421,7 +1522,7 @@ yyreduce:
;}
break;
- case 39:
+ case 40:
{
menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr));
@@ -1433,7 +1534,7 @@ yyreduce:
;}
break;
- case 40:
+ case 41:
{
menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
@@ -1441,7 +1542,7 @@ yyreduce:
;}
break;
- case 41:
+ case 42:
{
menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr));
@@ -1449,7 +1550,29 @@ yyreduce:
;}
break;
- case 42:
+ case 45:
+
+ {
+ struct kconf_id *id = kconf_id_lookup((yyvsp[-1].string), strlen((yyvsp[-1].string)));
+ if (id && id->flags & TF_OPTION)
+ menu_add_option(id->token, (yyvsp[0].string));
+ else
+ zconfprint("warning: ignoring unknown option %s", (yyvsp[-1].string));
+ free((yyvsp[-1].string));
+;}
+ break;
+
+ case 46:
+
+ { (yyval.string) = NULL; ;}
+ break;
+
+ case 47:
+
+ { (yyval.string) = (yyvsp[0].string); ;}
+ break;
+
+ case 48:
{
struct symbol *sym = sym_lookup(NULL, 0);
@@ -1460,14 +1583,14 @@ yyreduce:
;}
break;
- case 43:
+ case 49:
{
(yyval.menu) = menu_add_menu();
;}
break;
- case 44:
+ case 50:
{
if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) {
@@ -1477,7 +1600,7 @@ yyreduce:
;}
break;
- case 52:
+ case 58:
{
menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
@@ -1485,7 +1608,7 @@ yyreduce:
;}
break;
- case 53:
+ case 59:
{
if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) {
@@ -1498,7 +1621,7 @@ yyreduce:
;}
break;
- case 54:
+ case 60:
{
current_entry->sym->flags |= SYMBOL_OPTIONAL;
@@ -1506,7 +1629,7 @@ yyreduce:
;}
break;
- case 55:
+ case 61:
{
if ((yyvsp[-3].id)->stype == S_UNKNOWN) {
@@ -1518,7 +1641,7 @@ yyreduce:
;}
break;
- case 58:
+ case 64:
{
printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
@@ -1528,7 +1651,7 @@ yyreduce:
;}
break;
- case 59:
+ case 65:
{
if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) {
@@ -1538,7 +1661,7 @@ yyreduce:
;}
break;
- case 65:
+ case 71:
{
menu_add_entry(NULL);
@@ -1547,14 +1670,14 @@ yyreduce:
;}
break;
- case 66:
+ case 72:
{
(yyval.menu) = menu_add_menu();
;}
break;
- case 67:
+ case 73:
{
if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) {
@@ -1564,7 +1687,7 @@ yyreduce:
;}
break;
- case 73:
+ case 79:
{
printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
@@ -1572,7 +1695,7 @@ yyreduce:
;}
break;
- case 74:
+ case 80:
{
menu_add_entry(NULL);
@@ -1581,14 +1704,14 @@ yyreduce:
;}
break;
- case 75:
+ case 81:
{
menu_end_entry();
;}
break;
- case 76:
+ case 82:
{
printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
@@ -1596,14 +1719,14 @@ yyreduce:
;}
break;
- case 77:
+ case 83:
{
current_entry->sym->help = (yyvsp[0].string);
;}
break;
- case 82:
+ case 88:
{
menu_add_dep((yyvsp[-1].expr));
@@ -1611,7 +1734,7 @@ yyreduce:
;}
break;
- case 83:
+ case 89:
{
menu_add_dep((yyvsp[-1].expr));
@@ -1619,7 +1742,7 @@ yyreduce:
;}
break;
- case 84:
+ case 90:
{
menu_add_dep((yyvsp[-1].expr));
@@ -1627,87 +1750,88 @@ yyreduce:
;}
break;
- case 86:
+ case 92:
{
menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr));
;}
break;
- case 89:
+ case 95:
{ (yyval.id) = (yyvsp[-1].id); ;}
break;
- case 90:
+ case 96:
{ (yyval.id) = (yyvsp[-1].id); ;}
break;
- case 91:
+ case 97:
{ (yyval.id) = (yyvsp[-1].id); ;}
break;
- case 94:
+ case 100:
{ (yyval.expr) = NULL; ;}
break;
- case 95:
+ case 101:
{ (yyval.expr) = (yyvsp[0].expr); ;}
break;
- case 96:
+ case 102:
{ (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); ;}
break;
- case 97:
+ case 103:
{ (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
break;
- case 98:
+ case 104:
{ (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
break;
- case 99:
+ case 105:
{ (yyval.expr) = (yyvsp[-1].expr); ;}
break;
- case 100:
+ case 106:
{ (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); ;}
break;
- case 101:
+ case 107:
{ (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
break;
- case 102:
+ case 108:
{ (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
break;
- case 103:
+ case 109:
{ (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); ;}
break;
- case 104:
+ case 110:
{ (yyval.symbol) = sym_lookup((yyvsp[0].string), 1); free((yyvsp[0].string)); ;}
break;
+ default: break;
}
-/* Line 1037 of yacc.c. */
+/* Line 1126 of yacc.c. */
yyvsp -= yylen;
@@ -1747,12 +1871,36 @@ yyerrlab:
if (YYPACT_NINF < yyn && yyn < YYLAST)
{
- YYSIZE_T yysize = 0;
int yytype = YYTRANSLATE (yychar);
- const char* yyprefix;
- char *yymsg;
+ YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
+ YYSIZE_T yysize = yysize0;
+ YYSIZE_T yysize1;
+ int yysize_overflow = 0;
+ char *yymsg = 0;
+# define YYERROR_VERBOSE_ARGS_MAXIMUM 5
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
+#if 0
+ /* This is so xgettext sees the translatable formats that are
+ constructed on the fly. */
+ YY_("syntax error, unexpected %s");
+ YY_("syntax error, unexpected %s, expecting %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s or %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
+#endif
+ char *yyfmt;
+ char const *yyf;
+ static char const yyunexpected[] = "syntax error, unexpected %s";
+ static char const yyexpecting[] = ", expecting %s";
+ static char const yyor[] = " or %s";
+ char yyformat[sizeof yyunexpected
+ + sizeof yyexpecting - 1
+ + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
+ * (sizeof yyor - 1))];
+ char const *yyprefix = yyexpecting;
+
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
@@ -1760,48 +1908,68 @@ yyerrlab:
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
- int yycount = 0;
+ int yycount = 1;
+
+ yyarg[0] = yytname[yytype];
+ yyfmt = yystpcpy (yyformat, yyunexpected);
- yyprefix = ", expecting ";
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
- yysize += yystrlen (yyprefix) + yystrlen (yytname [yyx]);
- yycount += 1;
- if (yycount == 5)
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
- yysize = 0;
+ yycount = 1;
+ yysize = yysize0;
+ yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
+ yyarg[yycount++] = yytname[yyx];
+ yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+ yysize_overflow |= yysize1 < yysize;
+ yysize = yysize1;
+ yyfmt = yystpcpy (yyfmt, yyprefix);
+ yyprefix = yyor;
}
- yysize += (sizeof ("syntax error, unexpected ")
- + yystrlen (yytname[yytype]));
- yymsg = (char *) YYSTACK_ALLOC (yysize);
- if (yymsg != 0)
- {
- char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
- yyp = yystpcpy (yyp, yytname[yytype]);
- if (yycount < 5)
+ yyf = YY_(yyformat);
+ yysize1 = yysize + yystrlen (yyf);
+ yysize_overflow |= yysize1 < yysize;
+ yysize = yysize1;
+
+ if (!yysize_overflow && yysize <= YYSTACK_ALLOC_MAXIMUM)
+ yymsg = (char *) YYSTACK_ALLOC (yysize);
+ if (yymsg)
+ {
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ char *yyp = yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyf))
{
- yyprefix = ", expecting ";
- for (yyx = yyxbegin; yyx < yyxend; ++yyx)
- if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
- {
- yyp = yystpcpy (yyp, yyprefix);
- yyp = yystpcpy (yyp, yytname[yyx]);
- yyprefix = " or ";
- }
+ if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyf += 2;
+ }
+ else
+ {
+ yyp++;
+ yyf++;
+ }
}
yyerror (yymsg);
YYSTACK_FREE (yymsg);
}
else
- yyerror ("syntax error; also virtual memory exhausted");
+ {
+ yyerror (YY_("syntax error"));
+ goto yyexhaustedlab;
+ }
}
else
#endif /* YYERROR_VERBOSE */
- yyerror ("syntax error");
+ yyerror (YY_("syntax error"));
}
@@ -1813,18 +1981,9 @@ yyerrlab:
if (yychar <= YYEOF)
{
- /* If at end of input, pop the error token,
- then the rest of the stack, then return failure. */
+ /* Return failure if at end of input. */
if (yychar == YYEOF)
- for (;;)
- {
-
- YYPOPSTACK;
- if (yyssp == yyss)
- YYABORT;
- yydestruct ("Error: popping",
- yystos[*yyssp], yyvsp);
- }
+ YYABORT;
}
else
{
@@ -1843,12 +2002,11 @@ yyerrlab:
`---------------------------------------------------*/
yyerrorlab:
-#ifdef __GNUC__
- /* Pacify GCC when the user code never invokes YYERROR and the label
- yyerrorlab therefore never appears in user code. */
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
if (0)
goto yyerrorlab;
-#endif
yyvsp -= yylen;
yyssp -= yylen;
@@ -1911,23 +2069,29 @@ yyacceptlab:
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
- yydestruct ("Error: discarding lookahead",
- yytoken, &yylval);
- yychar = YYEMPTY;
yyresult = 1;
goto yyreturn;
#ifndef yyoverflow
-/*----------------------------------------------.
-| yyoverflowlab -- parser overflow comes here. |
-`----------------------------------------------*/
-yyoverflowlab:
- yyerror ("parser stack overflow");
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
+ if (yychar != YYEOF && yychar != YYEMPTY)
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK;
+ }
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
@@ -1948,7 +2112,9 @@ void conf_parse(const char *name)
sym_init();
menu_init();
- modules_sym = sym_lookup("MODULES", 0);
+ modules_sym = sym_lookup(NULL, 0);
+ modules_sym->type = S_BOOLEAN;
+ modules_sym->flags |= SYMBOL_AUTO;
rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
#if YYDEBUG
@@ -1958,6 +2124,12 @@ void conf_parse(const char *name)
zconfparse();
if (zconfnerrs)
exit(1);
+ if (!modules_sym->prop) {
+ struct property *prop;
+
+ prop = prop_alloc(P_DEFAULT, modules_sym);
+ prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
+ }
menu_finalize(&rootmenu);
for_all_symbols(i, sym) {
sym_check_deps(sym);
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 1f61fba6aa28..ab44feb3c600 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -71,6 +71,7 @@ static struct menu *current_menu, *current_entry;
%token <id>T_DEFAULT
%token <id>T_SELECT
%token <id>T_RANGE
+%token <id>T_OPTION
%token <id>T_ON
%token <string> T_WORD
%token <string> T_WORD_QUOTE
@@ -91,6 +92,7 @@ static struct menu *current_menu, *current_entry;
%type <id> end
%type <id> option_name
%type <menu> if_entry menu_entry choice_entry
+%type <string> symbol_option_arg
%destructor {
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -173,6 +175,7 @@ menuconfig_stmt: menuconfig_entry_start config_option_list
config_option_list:
/* empty */
| config_option_list config_option
+ | config_option_list symbol_option
| config_option_list depends
| config_option_list help
| config_option_list option_error
@@ -215,6 +218,26 @@ config_option: T_RANGE symbol symbol if_expr T_EOL
printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
};
+symbol_option: T_OPTION symbol_option_list T_EOL
+;
+
+symbol_option_list:
+ /* empty */
+ | symbol_option_list T_WORD symbol_option_arg
+{
+ struct kconf_id *id = kconf_id_lookup($2, strlen($2));
+ if (id && id->flags & TF_OPTION)
+ menu_add_option(id->token, $3);
+ else
+ zconfprint("warning: ignoring unknown option %s", $2);
+ free($2);
+};
+
+symbol_option_arg:
+ /* empty */ { $$ = NULL; }
+ | T_EQUAL prompt { $$ = $2; }
+;
+
/* choice entry */
choice: T_CHOICE T_EOL
@@ -458,7 +481,9 @@ void conf_parse(const char *name)
sym_init();
menu_init();
- modules_sym = sym_lookup("MODULES", 0);
+ modules_sym = sym_lookup(NULL, 0);
+ modules_sym->type = S_BOOLEAN;
+ modules_sym->flags |= SYMBOL_AUTO;
rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
#if YYDEBUG
@@ -468,6 +493,12 @@ void conf_parse(const char *name)
zconfparse();
if (zconfnerrs)
exit(1);
+ if (!modules_sym->prop) {
+ struct property *prop;
+
+ prop = prop_alloc(P_DEFAULT, modules_sym);
+ prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
+ }
menu_finalize(&rootmenu);
for_all_symbols(i, sym) {
sym_check_deps(sym);
diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c
index 3c92c83733f4..725d61c0fb43 100644
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -28,7 +28,7 @@ main(int argc, char **argv)
printf("#define KERNEL_ELFCLASS ELFCLASS64\n");
break;
default:
- abort();
+ exit(1);
}
switch (ei[EI_DATA]) {
case ELFDATA2LSB:
@@ -38,7 +38,7 @@ main(int argc, char **argv)
printf("#define KERNEL_ELFDATA ELFDATA2MSB\n");
break;
default:
- abort();
+ exit(1);
}
if (sizeof(unsigned long) == 4) {
@@ -53,7 +53,7 @@ main(int argc, char **argv)
else if (memcmp(endian_test.c, "\x02\x01", 2) == 0)
printf("#define HOST_ELFDATA ELFDATA2LSB\n");
else
- abort();
+ exit(1);
if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0))
printf("#define MODULE_SYMBOL_PREFIX \"_\"\n");
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d0f86ed43f7a..0dd16177642d 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -13,6 +13,7 @@
#include <ctype.h>
#include "modpost.h"
+#include "../../include/linux/license.h"
/* Are we using CONFIG_MODVERSIONS? */
int modversions = 0;
@@ -22,6 +23,8 @@ int have_vmlinux = 0;
static int all_versions = 0;
/* If we are modposting external module set to 1 */
static int external_module = 0;
+/* How a symbol is exported */
+enum export {export_plain, export_gpl, export_gpl_future, export_unknown};
void fatal(const char *fmt, ...)
{
@@ -97,6 +100,7 @@ static struct module *new_module(char *modname)
/* add to list */
mod->name = p;
+ mod->gpl_compatible = -1;
mod->next = modules;
modules = mod;
@@ -118,6 +122,7 @@ struct symbol {
unsigned int kernel:1; /* 1 if symbol is from kernel
* (only for external modules) **/
unsigned int preloaded:1; /* 1 if symbol from Module.symvers */
+ enum export export; /* Type of export */
char name[0];
};
@@ -153,7 +158,8 @@ static struct symbol *alloc_symbol(const char *name, unsigned int weak,
}
/* For the hash of exported symbols */
-static struct symbol *new_symbol(const char *name, struct module *module)
+static struct symbol *new_symbol(const char *name, struct module *module,
+ enum export export)
{
unsigned int hash;
struct symbol *new;
@@ -161,6 +167,7 @@ static struct symbol *new_symbol(const char *name, struct module *module)
hash = tdb_hash(name) % SYMBOL_HASH_SIZE;
new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]);
new->module = module;
+ new->export = export;
return new;
}
@@ -179,16 +186,55 @@ static struct symbol *find_symbol(const char *name)
return NULL;
}
+static struct {
+ const char *str;
+ enum export export;
+} export_list[] = {
+ { .str = "EXPORT_SYMBOL", .export = export_plain },
+ { .str = "EXPORT_SYMBOL_GPL", .export = export_gpl },
+ { .str = "EXPORT_SYMBOL_GPL_FUTURE", .export = export_gpl_future },
+ { .str = "(unknown)", .export = export_unknown },
+};
+
+
+static const char *export_str(enum export ex)
+{
+ return export_list[ex].str;
+}
+
+static enum export export_no(const char * s)
+{
+ int i;
+ for (i = 0; export_list[i].export != export_unknown; i++) {
+ if (strcmp(export_list[i].str, s) == 0)
+ return export_list[i].export;
+ }
+ return export_unknown;
+}
+
+static enum export export_from_sec(struct elf_info *elf, Elf_Section sec)
+{
+ if (sec == elf->export_sec)
+ return export_plain;
+ else if (sec == elf->export_gpl_sec)
+ return export_gpl;
+ else if (sec == elf->export_gpl_future_sec)
+ return export_gpl_future;
+ else
+ return export_unknown;
+}
+
/**
* Add an exported symbol - it may have already been added without a
* CRC, in this case just update the CRC
**/
-static struct symbol *sym_add_exported(const char *name, struct module *mod)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+ enum export export)
{
struct symbol *s = find_symbol(name);
if (!s) {
- s = new_symbol(name, mod);
+ s = new_symbol(name, mod, export);
} else {
if (!s->preloaded) {
warn("%s: '%s' exported twice. Previous export "
@@ -200,16 +246,17 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod)
s->preloaded = 0;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
+ s->export = export;
return s;
}
static void sym_update_crc(const char *name, struct module *mod,
- unsigned int crc)
+ unsigned int crc, enum export export)
{
struct symbol *s = find_symbol(name);
if (!s)
- s = new_symbol(name, mod);
+ s = new_symbol(name, mod, export);
s->crc = crc;
s->crc_valid = 1;
}
@@ -283,7 +330,7 @@ static void parse_elf(struct elf_info *info, const char *filename)
hdr = grab_file(filename, &info->size);
if (!hdr) {
perror(filename);
- abort();
+ exit(1);
}
info->hdr = hdr;
if (info->size < sizeof(*hdr))
@@ -309,13 +356,21 @@ static void parse_elf(struct elf_info *info, const char *filename)
for (i = 1; i < hdr->e_shnum; i++) {
const char *secstrings
= (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ const char *secname;
if (sechdrs[i].sh_offset > info->size)
goto truncated;
- if (strcmp(secstrings+sechdrs[i].sh_name, ".modinfo") == 0) {
+ secname = secstrings + sechdrs[i].sh_name;
+ if (strcmp(secname, ".modinfo") == 0) {
info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
info->modinfo_len = sechdrs[i].sh_size;
- }
+ } else if (strcmp(secname, "__ksymtab") == 0)
+ info->export_sec = i;
+ else if (strcmp(secname, "__ksymtab_gpl") == 0)
+ info->export_gpl_sec = i;
+ else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
+ info->export_gpl_future_sec = i;
+
if (sechdrs[i].sh_type != SHT_SYMTAB)
continue;
@@ -353,6 +408,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
Elf_Sym *sym, const char *symname)
{
unsigned int crc;
+ enum export export = export_from_sec(info, sym->st_shndx);
switch (sym->st_shndx) {
case SHN_COMMON:
@@ -362,7 +418,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
/* CRC'd symbol */
if (memcmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
crc = (unsigned int) sym->st_value;
- sym_update_crc(symname + strlen(CRC_PFX), mod, crc);
+ sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
+ export);
}
break;
case SHN_UNDEF:
@@ -406,7 +463,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
default:
/* All exported symbols */
if (memcmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
- sym_add_exported(symname + strlen(KSYMTAB_PFX), mod);
+ sym_add_exported(symname + strlen(KSYMTAB_PFX), mod,
+ export);
}
if (strcmp(symname, MODULE_SYMBOL_PREFIX "init_module") == 0)
mod->has_init = 1;
@@ -437,13 +495,18 @@ static char *next_string(char *string, unsigned long *secsize)
return string;
}
-static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
- const char *tag)
+static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len,
+ const char *tag, char *info)
{
char *p;
unsigned int taglen = strlen(tag);
unsigned long size = modinfo_len;
+ if (info) {
+ size -= info - (char *)modinfo;
+ modinfo = next_string(info, &size);
+ }
+
for (p = modinfo; p; p = next_string(p, &size)) {
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
return p + taglen + 1;
@@ -451,6 +514,13 @@ static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
return NULL;
}
+static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
+ const char *tag)
+
+{
+ return get_next_modinfo(modinfo, modinfo_len, tag, NULL);
+}
+
/**
* Test if string s ends in string sub
* return 0 if match
@@ -821,6 +891,10 @@ static int init_section_ref_ok(const char *name)
".pci_fixup_final",
".pdr",
"__param",
+ "__ex_table",
+ ".fixup",
+ ".smp_locks",
+ ".plt", /* seen on ARCH=um build on x86_64. Harmless */
NULL
};
/* Start of section names */
@@ -846,6 +920,8 @@ static int init_section_ref_ok(const char *name)
for (s = namelist3; *s; s++)
if (strstr(name, *s) != NULL)
return 1;
+ if (strrcmp(name, ".init") == 0)
+ return 1;
return 0;
}
@@ -892,6 +968,10 @@ static int exit_section_ref_ok(const char *name)
".exitcall.exit",
".eh_frame",
".stab",
+ "__ex_table",
+ ".fixup",
+ ".smp_locks",
+ ".plt", /* seen on ARCH=um build on x86_64. Harmless */
NULL
};
/* Start of section names */
@@ -921,6 +1001,7 @@ static void read_symbols(char *modname)
{
const char *symname;
char *version;
+ char *license;
struct module *mod;
struct elf_info info = { };
Elf_Sym *sym;
@@ -936,6 +1017,18 @@ static void read_symbols(char *modname)
mod->skip = 1;
}
+ license = get_modinfo(info.modinfo, info.modinfo_len, "license");
+ while (license) {
+ if (license_is_gpl_compatible(license))
+ mod->gpl_compatible = 1;
+ else {
+ mod->gpl_compatible = 0;
+ break;
+ }
+ license = get_next_modinfo(info.modinfo, info.modinfo_len,
+ "license", license);
+ }
+
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = info.strtab + sym->st_name;
@@ -992,6 +1085,41 @@ void buf_write(struct buffer *buf, const char *s, int len)
buf->pos += len;
}
+void check_license(struct module *mod)
+{
+ struct symbol *s, *exp;
+
+ for (s = mod->unres; s; s = s->next) {
+ const char *basename;
+ if (mod->gpl_compatible == 1) {
+ /* GPL-compatible modules may use all symbols */
+ continue;
+ }
+ exp = find_symbol(s->name);
+ if (!exp || exp->module == mod)
+ continue;
+ basename = strrchr(mod->name, '/');
+ if (basename)
+ basename++;
+ switch (exp->export) {
+ case export_gpl:
+ fatal("modpost: GPL-incompatible module %s "
+ "uses GPL-only symbol '%s'\n",
+ basename ? basename : mod->name,
+ exp->name);
+ break;
+ case export_gpl_future:
+ warn("modpost: GPL-incompatible module %s "
+ "uses future GPL-only symbol '%s'\n",
+ basename ? basename : mod->name,
+ exp->name);
+ break;
+ case export_plain: /* ignore */ break;
+ case export_unknown: /* ignore */ break;
+ }
+ }
+}
+
/**
* Header for the generated file
**/
@@ -1142,6 +1270,9 @@ static void write_if_changed(struct buffer *b, const char *fname)
fclose(file);
}
+/* parse Module.symvers file. line format:
+ * 0x12345678<tab>symbol<tab>module[<tab>export]
+ **/
static void read_dump(const char *fname, unsigned int kernel)
{
unsigned long size, pos = 0;
@@ -1153,7 +1284,7 @@ static void read_dump(const char *fname, unsigned int kernel)
return;
while ((line = get_next_line(&pos, file, size))) {
- char *symname, *modname, *d;
+ char *symname, *modname, *d, *export;
unsigned int crc;
struct module *mod;
struct symbol *s;
@@ -1164,8 +1295,9 @@ static void read_dump(const char *fname, unsigned int kernel)
if (!(modname = strchr(symname, '\t')))
goto fail;
*modname++ = '\0';
- if (strchr(modname, '\t'))
- goto fail;
+ if ((export = strchr(modname, '\t')) != NULL)
+ *export++ = '\0';
+
crc = strtoul(line, &d, 16);
if (*symname == '\0' || *modname == '\0' || *d != '\0')
goto fail;
@@ -1177,10 +1309,10 @@ static void read_dump(const char *fname, unsigned int kernel)
mod = new_module(NOFAIL(strdup(modname)));
mod->skip = 1;
}
- s = sym_add_exported(symname, mod);
+ s = sym_add_exported(symname, mod, export_no(export));
s->kernel = kernel;
s->preloaded = 1;
- sym_update_crc(symname, mod, crc);
+ sym_update_crc(symname, mod, crc, export_no(export));
}
return;
fail:
@@ -1210,9 +1342,10 @@ static void write_dump(const char *fname)
symbol = symbolhash[n];
while (symbol) {
if (dump_sym(symbol))
- buf_printf(&buf, "0x%08x\t%s\t%s\n",
+ buf_printf(&buf, "0x%08x\t%s\t%s\t%s\n",
symbol->crc, symbol->name,
- symbol->module->name);
+ symbol->module->name,
+ export_str(symbol->export));
symbol = symbol->next;
}
}
@@ -1263,6 +1396,12 @@ int main(int argc, char **argv)
for (mod = modules; mod; mod = mod->next) {
if (mod->skip)
continue;
+ check_license(mod);
+ }
+
+ for (mod = modules; mod; mod = mod->next) {
+ if (mod->skip)
+ continue;
buf.pos = 0;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 861d866fcd83..2b00c6062844 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -100,6 +100,7 @@ buf_write(struct buffer *buf, const char *s, int len);
struct module {
struct module *next;
const char *name;
+ int gpl_compatible;
struct symbol *unres;
int seen;
int skip;
@@ -115,6 +116,9 @@ struct elf_info {
Elf_Shdr *sechdrs;
Elf_Sym *symtab_start;
Elf_Sym *symtab_stop;
+ Elf_Section export_sec;
+ Elf_Section export_gpl_sec;
+ Elf_Section export_gpl_future_sec;
const char *strtab;
char *modinfo;
unsigned int modinfo_len;
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 0b1038737548..df892841b118 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -73,8 +73,13 @@ echo "%ifarch ia64"
echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
echo "%else"
+echo "%ifarch ppc64"
+echo "cp vmlinux arch/powerpc/boot"
+echo "cp arch/powerpc/boot/"'$KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
+echo "%else"
echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
echo "%endif"
+echo "%endif"
echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
new file mode 100644
index 000000000000..43098afe7431
--- /dev/null
+++ b/scripts/rt-tester/check-all.sh
@@ -0,0 +1,22 @@
+
+
+function testit ()
+{
+ printf "%-30s: " $1
+ ./rt-tester.py $1 | grep Pass
+}
+
+testit t2-l1-2rt-sameprio.tst
+testit t2-l1-pi.tst
+testit t2-l1-signal.tst
+#testit t2-l2-2rt-deadlock.tst
+testit t3-l1-pi-1rt.tst
+testit t3-l1-pi-2rt.tst
+testit t3-l1-pi-3rt.tst
+testit t3-l1-pi-signal.tst
+testit t3-l1-pi-steal.tst
+testit t3-l2-pi.tst
+testit t4-l2-pi-deboost.tst
+testit t5-l4-pi-boost-deboost.tst
+testit t5-l4-pi-boost-deboost-setsched.tst
+
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
new file mode 100644
index 000000000000..4c79660793cf
--- /dev/null
+++ b/scripts/rt-tester/rt-tester.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# rt-mutex tester
+#
+# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+import os
+import sys
+import getopt
+import shutil
+import string
+
+# Globals
+quiet = 0
+test = 0
+comments = 0
+
+sysfsprefix = "/sys/devices/system/rttest/rttest"
+statusfile = "/status"
+commandfile = "/command"
+
+# Command opcodes
+cmd_opcodes = {
+ "schedother" : "1",
+ "schedfifo" : "2",
+ "lock" : "3",
+ "locknowait" : "4",
+ "lockint" : "5",
+ "lockintnowait" : "6",
+ "lockcont" : "7",
+ "unlock" : "8",
+ "lockbkl" : "9",
+ "unlockbkl" : "10",
+ "signal" : "11",
+ "resetevent" : "98",
+ "reset" : "99",
+ }
+
+test_opcodes = {
+ "prioeq" : ["P" , "eq" , None],
+ "priolt" : ["P" , "lt" , None],
+ "priogt" : ["P" , "gt" , None],
+ "nprioeq" : ["N" , "eq" , None],
+ "npriolt" : ["N" , "lt" , None],
+ "npriogt" : ["N" , "gt" , None],
+ "unlocked" : ["M" , "eq" , 0],
+ "trylock" : ["M" , "eq" , 1],
+ "blocked" : ["M" , "eq" , 2],
+ "blockedwake" : ["M" , "eq" , 3],
+ "locked" : ["M" , "eq" , 4],
+ "opcodeeq" : ["O" , "eq" , None],
+ "opcodelt" : ["O" , "lt" , None],
+ "opcodegt" : ["O" , "gt" , None],
+ "eventeq" : ["E" , "eq" , None],
+ "eventlt" : ["E" , "lt" , None],
+ "eventgt" : ["E" , "gt" , None],
+ }
+
+# Print usage information
+def usage():
+ print "rt-tester.py <-c -h -q -t> <testfile>"
+ print " -c display comments after first command"
+ print " -h help"
+ print " -q quiet mode"
+ print " -t test mode (syntax check)"
+ print " testfile: read test specification from testfile"
+ print " otherwise from stdin"
+ return
+
+# Print progress when not in quiet mode
+def progress(str):
+ if not quiet:
+ print str
+
+# Analyse a status value
+def analyse(val, top, arg):
+
+ intval = int(val)
+
+ if top[0] == "M":
+ intval = intval / (10 ** int(arg))
+ intval = intval % 10
+ argval = top[2]
+ elif top[0] == "O":
+ argval = int(cmd_opcodes.get(arg, arg))
+ else:
+ argval = int(arg)
+
+ # progress("%d %s %d" %(intval, top[1], argval))
+
+ if top[1] == "eq" and intval == argval:
+ return 1
+ if top[1] == "lt" and intval < argval:
+ return 1
+ if top[1] == "gt" and intval > argval:
+ return 1
+ return 0
+
+# Parse the commandline
+try:
+ (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
+except getopt.GetoptError, ex:
+ usage()
+ sys.exit(1)
+
+# Parse commandline options
+for option, value in options:
+ if option == "-c":
+ comments = 1
+ elif option == "-q":
+ quiet = 1
+ elif option == "-t":
+ test = 1
+ elif option == '-h':
+ usage()
+ sys.exit(0)
+
+# Select the input source
+if arguments:
+ try:
+ fd = open(arguments[0])
+ except Exception,ex:
+ sys.stderr.write("File not found %s\n" %(arguments[0]))
+ sys.exit(1)
+else:
+ fd = sys.stdin
+
+linenr = 0
+
+# Read the test patterns
+while 1:
+
+ linenr = linenr + 1
+ line = fd.readline()
+ if not len(line):
+ break
+
+ line = line.strip()
+ parts = line.split(":")
+
+ if not parts or len(parts) < 1:
+ continue
+
+ if len(parts[0]) == 0:
+ continue
+
+ if parts[0].startswith("#"):
+ if comments > 1:
+ progress(line)
+ continue
+
+ if comments == 1:
+ comments = 2
+
+ progress(line)
+
+ cmd = parts[0].strip().lower()
+ opc = parts[1].strip().lower()
+ tid = parts[2].strip()
+ dat = parts[3].strip()
+
+ try:
+ # Test or wait for a status value
+ if cmd == "t" or cmd == "w":
+ testop = test_opcodes[opc]
+
+ fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
+ if test:
+ print fname
+ continue
+
+ while 1:
+ query = 1
+ fsta = open(fname, 'r')
+ status = fsta.readline().strip()
+ fsta.close()
+ stat = status.split(",")
+ for s in stat:
+ s = s.strip()
+ if s.startswith(testop[0]):
+ # Seperate status value
+ val = s[2:].strip()
+ query = analyse(val, testop, dat)
+ break
+ if query or cmd == "t":
+ break
+
+ progress(" " + status)
+
+ if not query:
+ sys.stderr.write("Test failed in line %d\n" %(linenr))
+ sys.exit(1)
+
+ # Issue a command to the tester
+ elif cmd == "c":
+ cmdnr = cmd_opcodes[opc]
+ # Build command string and sys filename
+ cmdstr = "%s:%s" %(cmdnr, dat)
+ fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
+ if test:
+ print fname
+ continue
+ fcmd = open(fname, 'w')
+ fcmd.write(cmdstr)
+ fcmd.close()
+
+ except Exception,ex:
+ sys.stderr.write(str(ex))
+ sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
+ if not test:
+ fd.close()
+ sys.exit(1)
+
+# Normal exit pass
+print "Pass"
+sys.exit(0)
+
+
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
new file mode 100644
index 000000000000..8821f27cc8be
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
@@ -0,0 +1,99 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+C: locknowait: 1: 0
+W: locked: 0: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+W: locked: 1: 0
+
+# Verify T0
+W: unlocked: 0: 0
+T: prioeq: 0: 80
+
+# Unlock
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
+# T1,T0 lock L0
+C: locknowait: 1: 0
+C: locknowait: 0: 0
+W: locked: 1: 0
+W: blocked: 0: 0
+T: prioeq: 1: 80
+
+# T1 unlock L0
+C: unlock: 1: 0
+W: locked: 0: 0
+
+# Verify T1
+W: unlocked: 1: 0
+T: prioeq: 1: 80
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
new file mode 100644
index 000000000000..cde1f189a02b
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-pi.tst
@@ -0,0 +1,82 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock with priority inversion
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+W: locked: 1: 0
+
+# Verify T1
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
new file mode 100644
index 000000000000..3ab0bfc49950
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-signal.tst
@@ -0,0 +1,77 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock with priority inversion
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+
+# Interrupt T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: opcodeeq: 1: -4
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
new file mode 100644
index 000000000000..f4b5d5d6215f
--- /dev/null
+++ b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
@@ -0,0 +1,89 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 2 lock
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T0 lock L1
+C: lockintnowait: 0: 1
+W: blocked: 0: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+
+# Make deadlock go away
+C: signal: 1: 0
+W: unlocked: 1: 0
+C: signal: 0: 0
+W: unlocked: 0: 1
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
+C: unlock: 1: 1
+W: unlocked: 1: 1
+
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
new file mode 100644
index 000000000000..63440ca2cce9
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-1rt.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
new file mode 100644
index 000000000000..e5816fe67df3
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-2rt.tst
@@ -0,0 +1,93 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+T: prioeq: 1: 81
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
new file mode 100644
index 000000000000..718b82b5d3bb
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-3rt.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: prioeq: 0: 80
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: locked: 1: 0
+W: unlocked: 2: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
new file mode 100644
index 000000000000..c6e213563498
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-signal.tst
@@ -0,0 +1,98 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+# Reset event counter
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set priorities
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+C: schedfifo: 2: 81
+
+# T0 lock L0
+C: lock: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0, no wait in the wakeup path
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+T: prioeq: 1: 80
+
+# T2 lock L0 interruptible, no wait in the wakeup path
+C: lockintnowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 81
+T: prioeq: 1: 80
+
+# Interrupt T2
+C: signal: 2: 2
+W: unlocked: 2: 0
+T: prioeq: 1: 80
+T: prioeq: 0: 80
+
+T: locked: 0: 0
+T: blocked: 1: 0
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T1 has locked L0 and exit
+W: locked: 1: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
+
+
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
new file mode 100644
index 000000000000..f53749d59d79
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-steal.tst
@@ -0,0 +1,96 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI steal pending ownership
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+C: schedfifo: 2: 81
+
+# T0 lock L0
+C: lock: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: lock: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T1 is in the wakeup loop
+W: blockedwake: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: lock: 2: 0
+# T1 leave wakeup loop
+C: lockcont: 1: 0
+
+# T2 must have the lock and T1 must be blocked
+W: locked: 2: 0
+W: blocked: 1: 0
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+# Wait until T1 is in the wakeup loop and let it run
+W: blockedwake: 1: 0
+C: lockcont: 1: 0
+W: locked: 1: 0
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
new file mode 100644
index 000000000000..cdc3e4fd7bac
--- /dev/null
+++ b/scripts/rt-tester/t3-l2-pi.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 2 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
new file mode 100644
index 000000000000..baa14137f473
--- /dev/null
+++ b/scripts/rt-tester/t4-l2-pi-deboost.tst
@@ -0,0 +1,123 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 4 threads 2 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T3 lock L0
+C: lockintnowait: 3: 0
+W: blocked: 3: 0
+T: prioeq: 0: 83
+
+# T0 lock L1
+C: lock: 0: 1
+W: blocked: 0: 1
+T: prioeq: 1: 83
+
+# T1 unlock L1
+C: unlock: 1: 1
+
+# Wait until T0 is in the wakeup code
+W: blockedwake: 0: 1
+
+# Verify that T1 is unboosted
+W: unlocked: 1: 1
+T: priolt: 1: 1
+
+# T2 lock L1 (T0 is boosted and pending owner !)
+C: locknowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 83
+
+# Interrupt T3 and wait until T3 returned
+C: signal: 3: 0
+W: unlocked: 3: 0
+
+# Verify prio of T0 (still pending owner,
+# but T2 is enqueued due to the previous boost by T3
+T: prioeq: 0: 82
+
+# Let T0 continue
+C: lockcont: 0: 1
+W: locked: 0: 1
+
+# Unlock L1 and let T2 get L1
+C: unlock: 0: 1
+W: locked: 2: 1
+
+# Verify that T0 is unboosted
+W: unlocked: 0: 1
+T: priolt: 0: 1
+
+# Unlock everything and exit
+C: unlock: 2: 1
+W: unlocked: 2: 1
+
+C: unlock: 0: 0
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
new file mode 100644
index 000000000000..e6ec0c81b54d
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
@@ -0,0 +1,183 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 5 threads 4 lock PI - modify priority of blocked threads
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+C: schedfifo: 4: 84
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L2
+C: locknowait: 2: 2
+W: locked: 2: 2
+
+# T2 lock L1
+C: lockintnowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+
+# T3 lock L3
+C: locknowait: 3: 3
+W: locked: 3: 3
+
+# T3 lock L2
+C: lockintnowait: 3: 2
+W: blocked: 3: 2
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+
+# T4 lock L3
+C: lockintnowait: 4: 3
+W: blocked: 4: 3
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+
+# Reduce prio of T4
+C: schedfifo: 4: 80
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+T: prioeq: 4: 80
+
+# Increase prio of T4
+C: schedfifo: 4: 84
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Reduce prio of T3
+C: schedfifo: 3: 80
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Increase prio of T3
+C: schedfifo: 3: 85
+T: prioeq: 0: 85
+T: prioeq: 1: 85
+T: prioeq: 2: 85
+T: prioeq: 3: 85
+T: prioeq: 4: 84
+
+# Reduce prio of T3
+C: schedfifo: 3: 83
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Signal T4
+C: signal: 4: 0
+W: unlocked: 4: 3
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+
+# Signal T3
+C: signal: 3: 0
+W: unlocked: 3: 2
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+T: prioeq: 2: 82
+
+# Signal T2
+C: signal: 2: 0
+W: unlocked: 2: 1
+T: prioeq: 0: 81
+T: prioeq: 1: 81
+
+# Signal T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 3: 3
+C: unlock: 2: 2
+C: unlock: 1: 1
+C: unlock: 0: 0
+
+W: unlocked: 3: 3
+W: unlocked: 2: 2
+W: unlocked: 1: 1
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
new file mode 100644
index 000000000000..ca64f8bbf4bc
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
@@ -0,0 +1,143 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 5 threads 4 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+C: schedfifo: 4: 84
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L2
+C: locknowait: 2: 2
+W: locked: 2: 2
+
+# T2 lock L1
+C: lockintnowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+
+# T3 lock L3
+C: locknowait: 3: 3
+W: locked: 3: 3
+
+# T3 lock L2
+C: lockintnowait: 3: 2
+W: blocked: 3: 2
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+
+# T4 lock L3
+C: lockintnowait: 4: 3
+W: blocked: 4: 3
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+
+# Signal T4
+C: signal: 4: 0
+W: unlocked: 4: 3
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+
+# Signal T3
+C: signal: 3: 0
+W: unlocked: 3: 2
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+T: prioeq: 2: 82
+
+# Signal T2
+C: signal: 2: 0
+W: unlocked: 2: 1
+T: prioeq: 0: 81
+T: prioeq: 1: 81
+
+# Signal T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 3: 3
+C: unlock: 2: 2
+C: unlock: 1: 1
+C: unlock: 0: 0
+
+W: unlocked: 3: 3
+W: unlocked: 2: 2
+W: unlocked: 1: 1
+W: unlocked: 0: 0
+
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 9a23825218f2..82e4993f0a73 100644
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -11,12 +11,12 @@ cd "${1:-.}" || usage
# Check for git and a git repo.
if head=`git rev-parse --verify HEAD 2>/dev/null`; then
# Do we have an untagged version?
- if [ "`git name-rev --tags HEAD`" = "HEAD undefined" ]; then
+ if git name-rev --tags HEAD | grep -E '^HEAD[[:space:]]+(.*~[0-9]*|undefined)$' > /dev/null; then
printf '%s%s' -g `echo "$head" | cut -c1-8`
fi
# Are there uncommitted changes?
- if git diff-files | read dummy; then
+ if git diff-index HEAD | read dummy; then
printf '%s' -dirty
fi
fi
diff --git a/security/Kconfig b/security/Kconfig
index 34f593410d57..67785df264e5 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -22,16 +22,22 @@ config KEYS
If you are unsure as to whether this is required, answer N.
config KEYS_DEBUG_PROC_KEYS
- bool "Enable the /proc/keys file by which all keys may be viewed"
+ bool "Enable the /proc/keys file by which keys may be viewed"
depends on KEYS
help
- This option turns on support for the /proc/keys file through which
- all the keys on the system can be listed.
+ This option turns on support for the /proc/keys file - through which
+ can be listed all the keys on the system that are viewable by the
+ reading process.
- This option is a slight security risk in that it makes it possible
- for anyone to see all the keys on the system. Normally the manager
- pretends keys that are inaccessible to a process don't exist as far
- as that process is concerned.
+ The only keys included in the list are those that grant View
+ permission to the reading process whether or not it possesses them.
+ Note that LSM security checks are still performed, and may further
+ filter out keys that the current process is not authorised to view.
+
+ Only key attributes are listed here; key payloads are not included in
+ the resulting table.
+
+ If you are unsure as to whether this is required, answer N.
config SECURITY
bool "Enable different security models"
diff --git a/security/dummy.c b/security/dummy.c
index c3c5493581e2..310fcdf7b749 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -870,7 +870,8 @@ static int dummy_setprocattr(struct task_struct *p, char *name, void *value, siz
}
#ifdef CONFIG_KEYS
-static inline int dummy_key_alloc(struct key *key, struct task_struct *ctx)
+static inline int dummy_key_alloc(struct key *key, struct task_struct *ctx,
+ unsigned long flags)
{
return 0;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index e066e6057955..1bb416f4bbce 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -99,7 +99,9 @@ extern int install_process_keyring(struct task_struct *tsk);
extern struct key *request_key_and_link(struct key_type *type,
const char *description,
const char *callout_info,
- struct key *dest_keyring);
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags);
/*
* request_key authorisation
diff --git a/security/keys/key.c b/security/keys/key.c
index 51f851557389..80de8c3e9cc3 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -11,15 +11,16 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/workqueue.h>
+#include <linux/random.h>
#include <linux/err.h>
#include "internal.h"
static kmem_cache_t *key_jar;
-static key_serial_t key_serial_next = 3;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
@@ -169,22 +170,23 @@ static void __init __key_insert_serial(struct key *key)
/*****************************************************************************/
/*
* assign a key the next unique serial number
- * - we work through all the serial numbers between 2 and 2^31-1 in turn and
- * then wrap
+ * - these are assigned randomly to avoid security issues through covert
+ * channel problems
*/
static inline void key_alloc_serial(struct key *key)
{
struct rb_node *parent, **p;
struct key *xkey;
- spin_lock(&key_serial_lock);
-
- /* propose a likely serial number and look for a hole for it in the
+ /* propose a random serial number and look for a hole for it in the
* serial number tree */
- key->serial = key_serial_next;
- if (key->serial < 3)
- key->serial = 3;
- key_serial_next = key->serial + 1;
+ do {
+ get_random_bytes(&key->serial, sizeof(key->serial));
+
+ key->serial >>= 1; /* negative numbers are not permitted */
+ } while (key->serial < 3);
+
+ spin_lock(&key_serial_lock);
parent = NULL;
p = &key_serial_tree.rb_node;
@@ -204,12 +206,11 @@ static inline void key_alloc_serial(struct key *key)
/* we found a key with the proposed serial number - walk the tree from
* that point looking for the next unused serial number */
- serial_exists:
+serial_exists:
for (;;) {
- key->serial = key_serial_next;
+ key->serial++;
if (key->serial < 2)
key->serial = 2;
- key_serial_next = key->serial + 1;
if (!rb_parent(parent))
p = &key_serial_tree.rb_node;
@@ -228,7 +229,7 @@ static inline void key_alloc_serial(struct key *key)
}
/* we've found a suitable hole - arrange for this key to occupy it */
- insert_here:
+insert_here:
rb_link_node(&key->serial_node, parent, p);
rb_insert_color(&key->serial_node, &key_serial_tree);
@@ -248,7 +249,7 @@ static inline void key_alloc_serial(struct key *key)
*/
struct key *key_alloc(struct key_type *type, const char *desc,
uid_t uid, gid_t gid, struct task_struct *ctx,
- key_perm_t perm, int not_in_quota)
+ key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
struct key *key;
@@ -269,12 +270,14 @@ struct key *key_alloc(struct key_type *type, const char *desc,
/* check that the user's quota permits allocation of another key and
* its description */
- if (!not_in_quota) {
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
- if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
- user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
- )
- goto no_quota;
+ if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
+ if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
+ user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
+ )
+ goto no_quota;
+ }
user->qnkeys++;
user->qnbytes += quotalen;
@@ -308,7 +311,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->payload.data = NULL;
key->security = NULL;
- if (!not_in_quota)
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
memset(&key->type_data, 0, sizeof(key->type_data));
@@ -318,7 +321,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
#endif
/* let the security module know about the key */
- ret = security_key_alloc(key, ctx);
+ ret = security_key_alloc(key, ctx, flags);
if (ret < 0)
goto security_error;
@@ -332,7 +335,7 @@ error:
security_error:
kfree(key->description);
kmem_cache_free(key_jar, key);
- if (!not_in_quota) {
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
@@ -345,7 +348,7 @@ security_error:
no_memory_3:
kmem_cache_free(key_jar, key);
no_memory_2:
- if (!not_in_quota) {
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
@@ -761,7 +764,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *description,
const void *payload,
size_t plen,
- int not_in_quota)
+ unsigned long flags)
{
struct key_type *ktype;
struct key *keyring, *key = NULL;
@@ -822,7 +825,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
/* allocate a new key */
key = key_alloc(ktype, description, current->fsuid, current->fsgid,
- current, perm, not_in_quota);
+ current, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_PTR(PTR_ERR(key));
goto error_3;
@@ -986,7 +989,7 @@ void unregister_key_type(struct key_type *ktype)
if (key->type == ktype) {
if (ktype->destroy)
ktype->destroy(key);
- memset(&key->payload, 0xbd, sizeof(key->payload));
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
}
}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ed71d86d2ce2..d9ca15c109cc 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -102,7 +102,7 @@ asmlinkage long sys_add_key(const char __user *_type,
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
- payload, plen, 0);
+ payload, plen, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
@@ -183,8 +183,9 @@ asmlinkage long sys_request_key(const char __user *_type,
}
/* do the search */
- key = request_key_and_link(ktype, description, callout_info,
- key_ref_to_ptr(dest_ref));
+ key = request_key_and_link(ktype, description, callout_info, NULL,
+ key_ref_to_ptr(dest_ref),
+ KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error5;
@@ -672,6 +673,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
*/
long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
{
+ struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
@@ -695,19 +697,50 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (uid != (uid_t) -1 && key->uid != uid)
- goto no_access;
+ goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
- goto no_access;
+ goto error_put;
}
- /* change the UID (have to update the quotas) */
+ /* change the UID */
if (uid != (uid_t) -1 && uid != key->uid) {
- /* don't support UID changing yet */
- ret = -EOPNOTSUPP;
- goto no_access;
+ ret = -ENOMEM;
+ newowner = key_user_lookup(uid);
+ if (!newowner)
+ goto error_put;
+
+ /* transfer the quota burden to the new user */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&newowner->lock);
+ if (newowner->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
+ newowner->qnbytes + key->quotalen >=
+ KEYQUOTA_MAX_BYTES)
+ goto quota_overrun;
+
+ newowner->qnkeys++;
+ newowner->qnbytes += key->quotalen;
+ spin_unlock(&newowner->lock);
+
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ atomic_inc(&newowner->nkeys);
+
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ atomic_dec(&key->user->nikeys);
+ atomic_inc(&newowner->nikeys);
+ }
+
+ zapowner = key->user;
+ key->user = newowner;
+ key->uid = uid;
}
/* change the GID */
@@ -716,12 +749,20 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
ret = 0;
- no_access:
+error_put:
up_write(&key->sem);
key_put(key);
- error:
+ if (zapowner)
+ key_user_put(zapowner);
+error:
return ret;
+quota_overrun:
+ spin_unlock(&newowner->lock);
+ zapowner = newowner;
+ ret = -EDQUOT;
+ goto error_put;
+
} /* end keyctl_chown_key() */
/*****************************************************************************/
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 1357207fc9df..e8d02acc51e7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -49,6 +49,7 @@ static inline unsigned keyring_hash(const char *desc)
static int keyring_instantiate(struct key *keyring,
const void *data, size_t datalen);
static int keyring_match(const struct key *keyring, const void *criterion);
+static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
static long keyring_read(const struct key *keyring,
@@ -59,6 +60,7 @@ struct key_type key_type_keyring = {
.def_datalen = sizeof(struct keyring_list),
.instantiate = keyring_instantiate,
.match = keyring_match,
+ .revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
.read = keyring_read,
@@ -240,7 +242,7 @@ static long keyring_read(const struct key *keyring,
* allocate a keyring and link into the destination keyring
*/
struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
- struct task_struct *ctx, int not_in_quota,
+ struct task_struct *ctx, unsigned long flags,
struct key *dest)
{
struct key *keyring;
@@ -249,7 +251,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
keyring = key_alloc(&key_type_keyring, description,
uid, gid, ctx,
(KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
- not_in_quota);
+ flags);
if (!IS_ERR(keyring)) {
ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
@@ -953,3 +955,22 @@ int keyring_clear(struct key *keyring)
} /* end keyring_clear() */
EXPORT_SYMBOL(keyring_clear);
+
+/*****************************************************************************/
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+static void keyring_revoke(struct key *keyring)
+{
+ struct keyring_list *klist = keyring->payload.subscriptions;
+
+ /* adjust the quota */
+ key_payload_reserve(keyring, 0);
+
+ if (klist) {
+ rcu_assign_pointer(keyring->payload.subscriptions, NULL);
+ call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ }
+
+} /* end keyring_revoke() */
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 12b750e51fbf..686a9ee0c5de 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -137,6 +137,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct timespec now;
unsigned long timo;
char xbuf[12];
+ int rc;
+
+ /* check whether the current task is allowed to view the key (assuming
+ * non-possession) */
+ rc = key_task_permission(make_key_ref(key, 0), current, KEY_VIEW);
+ if (rc < 0)
+ return 0;
now = current_kernel_time();
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 4d9825f9962c..32150cf7c37f 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -77,7 +77,8 @@ int alloc_uid_keyring(struct user_struct *user,
/* concoct a default session keyring */
sprintf(buf, "_uid_ses.%u", user->uid);
- session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, 0, NULL);
+ session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error;
@@ -87,8 +88,8 @@ int alloc_uid_keyring(struct user_struct *user,
* keyring */
sprintf(buf, "_uid.%u", user->uid);
- uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, 0,
- session_keyring);
+ uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
+ KEY_ALLOC_IN_QUOTA, session_keyring);
if (IS_ERR(uid_keyring)) {
key_put(session_keyring);
ret = PTR_ERR(uid_keyring);
@@ -144,7 +145,8 @@ int install_thread_keyring(struct task_struct *tsk)
sprintf(buf, "_tid.%u", tsk->pid);
- keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL);
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error;
@@ -178,7 +180,8 @@ int install_process_keyring(struct task_struct *tsk)
if (!tsk->signal->process_keyring) {
sprintf(buf, "_pid.%u", tsk->tgid);
- keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL);
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error;
@@ -209,6 +212,7 @@ error:
static int install_session_keyring(struct task_struct *tsk,
struct key *keyring)
{
+ unsigned long flags;
struct key *old;
char buf[20];
@@ -218,7 +222,12 @@ static int install_session_keyring(struct task_struct *tsk,
if (!keyring) {
sprintf(buf, "_ses.%u", tsk->tgid);
- keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL);
+ flags = KEY_ALLOC_QUOTA_OVERRUN;
+ if (tsk->signal->session_keyring)
+ flags = KEY_ALLOC_IN_QUOTA;
+
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
+ flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
}
@@ -728,7 +737,8 @@ long join_session_keyring(const char *name)
keyring = find_keyring_by_name(name, 0);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
- keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk, 0, NULL);
+ keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index eab66a06ca53..f573ac189a0a 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -1,6 +1,6 @@
/* request_key.c: request a key from userspace
*
- * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -33,7 +33,8 @@ DECLARE_WAIT_QUEUE_HEAD(request_key_conswq);
*/
static int call_sbin_request_key(struct key *key,
struct key *authkey,
- const char *op)
+ const char *op,
+ void *aux)
{
struct task_struct *tsk = current;
key_serial_t prkey, sskey;
@@ -48,8 +49,8 @@ static int call_sbin_request_key(struct key *key,
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
- keyring = keyring_alloc(desc, current->fsuid, current->fsgid,
- current, 1, NULL);
+ keyring = keyring_alloc(desc, current->fsuid, current->fsgid, current,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error_alloc;
@@ -126,7 +127,9 @@ error_alloc:
*/
static struct key *__request_key_construction(struct key_type *type,
const char *description,
- const char *callout_info)
+ const char *callout_info,
+ void *aux,
+ unsigned long flags)
{
request_key_actor_t actor;
struct key_construction cons;
@@ -134,12 +137,12 @@ static struct key *__request_key_construction(struct key_type *type,
struct key *key, *authkey;
int ret, negated;
- kenter("%s,%s,%s", type->name, description, callout_info);
+ kenter("%s,%s,%s,%lx", type->name, description, callout_info, flags);
/* create a key and add it to the queue */
key = key_alloc(type, description,
- current->fsuid, current->fsgid,
- current, KEY_POS_ALL, 0);
+ current->fsuid, current->fsgid, current, KEY_POS_ALL,
+ flags);
if (IS_ERR(key))
goto alloc_failed;
@@ -163,7 +166,7 @@ static struct key *__request_key_construction(struct key_type *type,
actor = call_sbin_request_key;
if (type->request_key)
actor = type->request_key;
- ret = actor(key, authkey, "create");
+ ret = actor(key, authkey, "create", aux);
if (ret < 0)
goto request_failed;
@@ -257,16 +260,18 @@ alloc_failed:
*/
static struct key *request_key_construction(struct key_type *type,
const char *description,
+ const char *callout_info,
+ void *aux,
struct key_user *user,
- const char *callout_info)
+ unsigned long flags)
{
struct key_construction *pcons;
struct key *key, *ckey;
DECLARE_WAITQUEUE(myself, current);
- kenter("%s,%s,{%d},%s",
- type->name, description, user->uid, callout_info);
+ kenter("%s,%s,{%d},%s,%lx",
+ type->name, description, user->uid, callout_info, flags);
/* see if there's such a key under construction already */
down_write(&key_construction_sem);
@@ -282,7 +287,8 @@ static struct key *request_key_construction(struct key_type *type,
}
/* see about getting userspace to construct the key */
- key = __request_key_construction(type, description, callout_info);
+ key = __request_key_construction(type, description, callout_info, aux,
+ flags);
error:
kleave(" = %p", key);
return key;
@@ -389,14 +395,17 @@ static void request_key_link(struct key *key, struct key *dest_keyring)
struct key *request_key_and_link(struct key_type *type,
const char *description,
const char *callout_info,
- struct key *dest_keyring)
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags)
{
struct key_user *user;
struct key *key;
key_ref_t key_ref;
- kenter("%s,%s,%s,%p",
- type->name, description, callout_info, dest_keyring);
+ kenter("%s,%s,%s,%p,%p,%lx",
+ type->name, description, callout_info, aux,
+ dest_keyring, flags);
/* search all the process keyrings for a key */
key_ref = search_process_keyrings(type, description, type->match,
@@ -429,7 +438,8 @@ struct key *request_key_and_link(struct key_type *type,
/* ask userspace (returns NULL if it waited on a key
* being constructed) */
key = request_key_construction(type, description,
- user, callout_info);
+ callout_info, aux,
+ user, flags);
if (key)
break;
@@ -485,8 +495,28 @@ struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info)
{
- return request_key_and_link(type, description, callout_info, NULL);
+ return request_key_and_link(type, description, callout_info, NULL,
+ NULL, KEY_ALLOC_IN_QUOTA);
} /* end request_key() */
EXPORT_SYMBOL(request_key);
+
+/*****************************************************************************/
+/*
+ * request a key with auxiliary data for the upcaller
+ * - search the process's keyrings
+ * - check the list of keys being created or updated
+ * - call out to userspace for a key if supplementary info was provided
+ */
+struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ const char *callout_info,
+ void *aux)
+{
+ return request_key_and_link(type, description, callout_info, aux,
+ NULL, KEY_ALLOC_IN_QUOTA);
+
+} /* end request_key_with_auxdata() */
+
+EXPORT_SYMBOL(request_key_with_auxdata);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index cb9817ced3fd..cbf58a91b00a 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -187,7 +187,7 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info)
authkey = key_alloc(&key_type_request_key_auth, desc,
current->fsuid, current->fsgid, current,
KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
- KEY_USR_VIEW, 1);
+ KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
goto error_alloc;
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 8e71895b97a7..5bbfdebb7acf 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -28,6 +28,7 @@ struct key_type key_type_user = {
.instantiate = user_instantiate,
.update = user_update,
.match = user_match,
+ .revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
@@ -67,6 +68,7 @@ error:
return ret;
} /* end user_instantiate() */
+
EXPORT_SYMBOL_GPL(user_instantiate);
/*****************************************************************************/
@@ -141,7 +143,28 @@ EXPORT_SYMBOL_GPL(user_match);
/*****************************************************************************/
/*
- * dispose of the data dangling from the corpse of a user
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void user_revoke(struct key *key)
+{
+ struct user_key_payload *upayload = key->payload.data;
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+
+ if (upayload) {
+ rcu_assign_pointer(key->payload.data, NULL);
+ call_rcu(&upayload->rcu, user_update_rcu_disposal);
+ }
+
+} /* end user_revoke() */
+
+EXPORT_SYMBOL(user_revoke);
+
+/*****************************************************************************/
+/*
+ * dispose of the data dangling from the corpse of a user key
*/
void user_destroy(struct key *key)
{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 79c16e31c884..28832e689800 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1099,6 +1099,17 @@ static int may_create(struct inode *dir,
FILESYSTEM__ASSOCIATE, &ad);
}
+/* Check whether a task can create a key. */
+static int may_create_key(u32 ksid,
+ struct task_struct *ctx)
+{
+ struct task_security_struct *tsec;
+
+ tsec = ctx->security;
+
+ return avc_has_perm(tsec->sid, ksid, SECCLASS_KEY, KEY__CREATE, NULL);
+}
+
#define MAY_LINK 0
#define MAY_UNLINK 1
#define MAY_RMDIR 2
@@ -1521,8 +1532,10 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm)
/* Default to the current task SID. */
bsec->sid = tsec->sid;
- /* Reset create SID on execve. */
+ /* Reset fs, key, and sock SIDs on execve. */
tsec->create_sid = 0;
+ tsec->keycreate_sid = 0;
+ tsec->sockcreate_sid = 0;
if (tsec->exec_sid) {
newsid = tsec->exec_sid;
@@ -2574,9 +2587,11 @@ static int selinux_task_alloc_security(struct task_struct *tsk)
tsec2->osid = tsec1->osid;
tsec2->sid = tsec1->sid;
- /* Retain the exec and create SIDs across fork */
+ /* Retain the exec, fs, key, and sock SIDs across fork */
tsec2->exec_sid = tsec1->exec_sid;
tsec2->create_sid = tsec1->create_sid;
+ tsec2->keycreate_sid = tsec1->keycreate_sid;
+ tsec2->sockcreate_sid = tsec1->sockcreate_sid;
/* Retain ptracer SID across fork, if any.
This will be reset by the ptrace hook upon any
@@ -2926,12 +2941,14 @@ static int selinux_socket_create(int family, int type,
{
int err = 0;
struct task_security_struct *tsec;
+ u32 newsid;
if (kern)
goto out;
tsec = current->security;
- err = avc_has_perm(tsec->sid, tsec->sid,
+ newsid = tsec->sockcreate_sid ? : tsec->sid;
+ err = avc_has_perm(tsec->sid, newsid,
socket_type_to_security_class(family, type,
protocol), SOCKET__CREATE, NULL);
@@ -2944,12 +2961,14 @@ static void selinux_socket_post_create(struct socket *sock, int family,
{
struct inode_security_struct *isec;
struct task_security_struct *tsec;
+ u32 newsid;
isec = SOCK_INODE(sock)->i_security;
tsec = current->security;
+ newsid = tsec->sockcreate_sid ? : tsec->sid;
isec->sclass = socket_type_to_security_class(family, type, protocol);
- isec->sid = kern ? SECINITSID_KERNEL : tsec->sid;
+ isec->sid = kern ? SECINITSID_KERNEL : newsid;
isec->initialized = 1;
return;
@@ -4150,6 +4169,10 @@ static int selinux_getprocattr(struct task_struct *p,
sid = tsec->exec_sid;
else if (!strcmp(name, "fscreate"))
sid = tsec->create_sid;
+ else if (!strcmp(name, "keycreate"))
+ sid = tsec->keycreate_sid;
+ else if (!strcmp(name, "sockcreate"))
+ sid = tsec->sockcreate_sid;
else
return -EINVAL;
@@ -4182,6 +4205,10 @@ static int selinux_setprocattr(struct task_struct *p,
error = task_has_perm(current, p, PROCESS__SETEXEC);
else if (!strcmp(name, "fscreate"))
error = task_has_perm(current, p, PROCESS__SETFSCREATE);
+ else if (!strcmp(name, "keycreate"))
+ error = task_has_perm(current, p, PROCESS__SETKEYCREATE);
+ else if (!strcmp(name, "sockcreate"))
+ error = task_has_perm(current, p, PROCESS__SETSOCKCREATE);
else if (!strcmp(name, "current"))
error = task_has_perm(current, p, PROCESS__SETCURRENT);
else
@@ -4211,6 +4238,13 @@ static int selinux_setprocattr(struct task_struct *p,
tsec->exec_sid = sid;
else if (!strcmp(name, "fscreate"))
tsec->create_sid = sid;
+ else if (!strcmp(name, "keycreate")) {
+ error = may_create_key(sid, p);
+ if (error)
+ return error;
+ tsec->keycreate_sid = sid;
+ } else if (!strcmp(name, "sockcreate"))
+ tsec->sockcreate_sid = sid;
else if (!strcmp(name, "current")) {
struct av_decision avd;
@@ -4264,7 +4298,8 @@ static int selinux_setprocattr(struct task_struct *p,
#ifdef CONFIG_KEYS
-static int selinux_key_alloc(struct key *k, struct task_struct *tsk)
+static int selinux_key_alloc(struct key *k, struct task_struct *tsk,
+ unsigned long flags)
{
struct task_security_struct *tsec = tsk->security;
struct key_security_struct *ksec;
@@ -4274,7 +4309,10 @@ static int selinux_key_alloc(struct key *k, struct task_struct *tsk)
return -ENOMEM;
ksec->obj = k;
- ksec->sid = tsec->sid;
+ if (tsec->keycreate_sid)
+ ksec->sid = tsec->keycreate_sid;
+ else
+ ksec->sid = tsec->sid;
k->security = ksec;
return 0;
@@ -4513,8 +4551,10 @@ static __init int selinux_init(void)
#ifdef CONFIG_KEYS
/* Add security information to initial keyrings */
- security_key_alloc(&root_user_keyring, current);
- security_key_alloc(&root_session_keyring, current);
+ selinux_key_alloc(&root_user_keyring, current,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ selinux_key_alloc(&root_session_keyring, current,
+ KEY_ALLOC_NOT_IN_QUOTA);
#endif
return 0;
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index bc020bde6c86..7c9b58380833 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -72,6 +72,8 @@
S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem")
S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack")
S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap")
+ S_(SECCLASS_PROCESS, PROCESS__SETKEYCREATE, "setkeycreate")
+ S_(SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, "setsockcreate")
S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue")
S_(SECCLASS_MSG, MSG__SEND, "send")
S_(SECCLASS_MSG, MSG__RECEIVE, "receive")
@@ -248,3 +250,4 @@
S_(SECCLASS_KEY, KEY__SEARCH, "search")
S_(SECCLASS_KEY, KEY__LINK, "link")
S_(SECCLASS_KEY, KEY__SETATTR, "setattr")
+ S_(SECCLASS_KEY, KEY__CREATE, "create")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 1205227a3a33..69fd4b48202c 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -467,6 +467,8 @@
#define PROCESS__EXECMEM 0x02000000UL
#define PROCESS__EXECSTACK 0x04000000UL
#define PROCESS__EXECHEAP 0x08000000UL
+#define PROCESS__SETKEYCREATE 0x10000000UL
+#define PROCESS__SETSOCKCREATE 0x20000000UL
#define IPC__CREATE 0x00000001UL
#define IPC__DESTROY 0x00000002UL
@@ -966,4 +968,4 @@
#define KEY__SEARCH 0x00000008UL
#define KEY__LINK 0x00000010UL
#define KEY__SETATTR 0x00000020UL
-
+#define KEY__CREATE 0x00000040UL
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 8f5547ad1856..cf54a304169a 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -32,6 +32,8 @@ struct task_security_struct {
u32 sid; /* current SID */
u32 exec_sid; /* exec SID */
u32 create_sid; /* fscreate SID */
+ u32 keycreate_sid; /* keycreate SID */
+ u32 sockcreate_sid; /* fscreate SID */
u32 ptrace_sid; /* SID of ptrace parent */
};
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 5f22d70fefc0..6b18225672c7 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -779,8 +779,9 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
strlcpy(card->shortname, "ARM AC'97 Interface", sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
- "%s at 0x%08lx, irq %d",
- card->shortname, dev->res.start, dev->irq[0]);
+ "%s at 0x%016llx, irq %d",
+ card->shortname, (unsigned long long)dev->res.start,
+ dev->irq[0]);
aaci = card->private_data;
mutex_init(&aaci->ac97_sem);
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 39c60d9e1efc..63e91431a29f 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -31,7 +31,7 @@ struct snd_seq_event_cell {
struct snd_seq_event_cell *next; /* next cell */
};
-/* design note: the pool is a contigious block of memory, if we dynamicly
+/* design note: the pool is a contiguous block of memory, if we dynamicly
want to add additional cells to the pool be better store this in another
pool as we need to know the base address of the pool when releasing
memory. */
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index d3cbbb047582..8b80024968be 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -160,8 +160,9 @@ static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device,
return -ENODEV;
}
if (pnp_port_len(device, 0) < IO_EXTENT) {
- snd_printk(KERN_ERR "PnP port length is %ld, expected %d\n",
- pnp_port_len(device, 0), IO_EXTENT);
+ snd_printk(KERN_ERR "PnP port length is %llu, expected %d\n",
+ (unsigned long long)pnp_port_len(device, 0),
+ IO_EXTENT);
return -ENODEV;
}
port[dev] = pnp_port_start(device, 0);
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index e6945db8ed1b..af60b0bc8115 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -2088,7 +2088,8 @@ static int __devinit snd_audiodrive_pnp(int dev, struct snd_audiodrive *acard,
kfree(cfg);
return -EAGAIN;
}
- snd_printdd("pnp: port=0x%lx\n", pnp_port_start(acard->devc, 0));
+ snd_printdd("pnp: port=0x%llx\n",
+ (unsigned long long)pnp_port_start(acard->devc, 0));
/* PnP initialization */
pdev = acard->dev;
pnp_init_resource_table(cfg);
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 866300f2acbb..c1c86e0fa56d 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -611,10 +611,10 @@ static int __devinit snd_interwave_pnp(int dev, struct snd_interwave *iwcard,
if (dma2[dev] >= 0)
dma2[dev] = pnp_dma(pdev, 1);
irq[dev] = pnp_irq(pdev, 0);
- snd_printdd("isapnp IW: sb port=0x%lx, gf1 port=0x%lx, codec port=0x%lx\n",
- pnp_port_start(pdev, 0),
- pnp_port_start(pdev, 1),
- pnp_port_start(pdev, 2));
+ snd_printdd("isapnp IW: sb port=0x%llx, gf1 port=0x%llx, codec port=0x%llx\n",
+ (unsigned long long)pnp_port_start(pdev, 0),
+ (unsigned long long)pnp_port_start(pdev, 1),
+ (unsigned long long)pnp_port_start(pdev, 2));
snd_printdd("isapnp IW: dma1=%i, dma2=%i, irq=%i\n", dma1[dev], dma2[dev], irq[dev]);
#ifdef SNDRV_STB
/* Tone Control initialization */
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 7f7f05fa518a..d64e67f2bafa 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -327,7 +327,8 @@ static int __devinit snd_card_sb16_pnp(int dev, struct snd_card_sb16 *acard,
goto __wt_error;
}
awe_port[dev] = pnp_port_start(pdev, 0);
- snd_printdd("pnp SB16: wavetable port=0x%lx\n", pnp_port_start(pdev, 0));
+ snd_printdd("pnp SB16: wavetable port=0x%llx\n",
+ (unsigned long long)pnp_port_start(pdev, 0));
} else {
__wt_error:
if (pdev) {
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 95754e2f71b8..f4980ca5c05c 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -493,6 +493,19 @@ config SOUND_CS4232
See <file:Documentation/sound/oss/CS4232> for more information on
configuring this card.
+config SOUND_SSCAPE
+ tristate "Ensoniq SoundScape support"
+ depends on SOUND_OSS
+ help
+ Answer Y if you have a sound card based on the Ensoniq SoundScape
+ chipset. Such cards are being manufactured at least by Ensoniq, Spea
+ and Reveal (Reveal makes also other cards).
+
+ If you compile the driver into the kernel, you have to add
+ "sscape=<io>,<irq>,<dma>,<mpuio>,<mpuirq>" to the kernel command
+ line.
+
+
config SOUND_VMIDI
tristate "Loopback MIDI device support"
depends on SOUND_OSS
@@ -839,6 +852,6 @@ config SOUND_SH_DAC_AUDIO
depends on SOUND_PRIME && CPU_SH3
config SOUND_SH_DAC_AUDIO_CHANNEL
- int " DAC channel"
+ int "DAC channel"
default "1"
depends on SOUND_SH_DAC_AUDIO
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
index c7f86f09c28d..80f6c08e26e7 100644
--- a/sound/oss/cs4232.c
+++ b/sound/oss/cs4232.c
@@ -405,7 +405,7 @@ static const struct pnp_device_id cs4232_pnp_table[] = {
MODULE_DEVICE_TABLE(pnp, cs4232_pnp_table);
-static int cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+static int __init cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct address_info *isapnpcfg;
diff --git a/sound/oss/forte.c b/sound/oss/forte.c
index 0294eec8ad90..44e578098d76 100644
--- a/sound/oss/forte.c
+++ b/sound/oss/forte.c
@@ -2035,8 +2035,9 @@ forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
pci_set_drvdata (pci_dev, chip);
- printk (KERN_INFO PFX "FM801 chip found at 0x%04lX-0x%04lX IRQ %u\n",
- chip->iobase, pci_resource_end (pci_dev, 0), chip->irq);
+ printk (KERN_INFO PFX "FM801 chip found at 0x%04lX-0x%16llX IRQ %u\n",
+ chip->iobase, (unsigned long long)pci_resource_end (pci_dev, 0),
+ chip->irq);
/* Power it up */
if ((ret = forte_chip_init (chip)) == 0)
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index fae05fe3de43..180e95c87e3e 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -97,19 +97,19 @@
*
* The documentation is an adventure: it's close but not fully accurate. I
* found out that after a reset some registers are *NOT* reset, though the
- * docs say the would be. Interresting ones are 0x7f, 0x7d and 0x7a. They are
- * related to the Audio 2 channel. I also was suprised about the consequenses
+ * docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are
+ * related to the Audio 2 channel. I also was surprised about the consequences
* of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves
* into ES1888 mode. This means that it claims IRQ 11, which happens to be my
* ISDN adapter. Needless to say it no longer worked. I now understand why
* after rebooting 0x7f already was 0x05, the value of my choice: the BIOS
* did it.
*
- * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is decribed
- * as if it's exactly the same as register 0xa1. This is *NOT* true. The
- * description of 0x70 in ES1869 docs is accurate however.
+ * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is
+ * described as if it's exactly the same as register 0xa1. This is *NOT* true.
+ * The description of 0x70 in ES1869 docs is accurate however.
* Well, the assumption about ES1869 was wrong: register 0x70 is very much
- * like register 0xa1, except that bit 7 is allways 1, whatever you want
+ * like register 0xa1, except that bit 7 is always 1, whatever you want
* it to be.
*
* When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2
@@ -117,10 +117,10 @@
*
* Software reset not being able to reset all registers is great! Especially
* the fact that register 0x78 isn't reset is great when you wanna change back
- * to single dma operation (simplex): audio 2 is still operation, and uses the
- * same dma as audio 1: your ess changes into a funny echo machine.
+ * to single dma operation (simplex): audio 2 is still operational, and uses
+ * the same dma as audio 1: your ess changes into a funny echo machine.
*
- * Received the new that ES1688 is detected as a ES1788. Did some thinking:
+ * Received the news that ES1688 is detected as a ES1788. Did some thinking:
* the ES1887 detection scheme suggests in step 2 to try if bit 3 of register
* 0x64 can be changed. This is inaccurate, first I inverted the * check: "If
* can be modified, it's a 1688", which lead to a correct detection
@@ -135,7 +135,7 @@
* About recognition of ESS chips
*
* The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in
- * a (preliminary ??) datasheet on ES1887. It's aim is to identify ES1887, but
+ * a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but
* during detection the text claims that "this chip may be ..." when a step
* fails. This scheme is used to distinct between the above chips.
* It appears however that some PnP chips like ES1868 are recognized as ES1788
@@ -156,9 +156,9 @@
*
* The existing ES1688 support didn't take care of the ES1688+ recording
* levels very well. Whenever a device was selected (recmask) for recording
- * it's recording level was loud, and it couldn't be changed. The fact that
+ * its recording level was loud, and it couldn't be changed. The fact that
* internal register 0xb4 could take care of RECLEV, didn't work meaning until
- * it's value was restored every time the chip was reset; this reset the
+ * its value was restored every time the chip was reset; this reset the
* value of 0xb4 too. I guess that's what 4front also had (have?) trouble with.
*
* About ES1887 support:
@@ -169,9 +169,9 @@
* the latter case the recording volumes are 0.
* Now recording levels of inputs can be controlled, by changing the playback
* levels. Futhermore several devices can be recorded together (which is not
- * possible with the ES1688.
+ * possible with the ES1688).
* Besides the separate recording level control for each input, the common
- * recordig level can also be controlled by RECLEV as described above.
+ * recording level can also be controlled by RECLEV as described above.
*
* Not only ES1887 have this recording mixer. I know the following from the
* documentation:
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 1a921ee71aba..29a6e0cff79f 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
@@ -308,7 +309,7 @@ struct via_info {
unsigned sixchannel: 1; /* 8233/35 with 6 channel support */
unsigned volume: 1;
- int locked_rate : 1;
+ unsigned locked_rate : 1;
int mixer_vol; /* 8233/35 volume - not yet implemented */
@@ -3522,7 +3523,7 @@ err_out_have_mixer:
err_out_kfree:
#ifndef VIA_NDEBUG
- memset (card, 0xAB, sizeof (*card)); /* poison memory */
+ memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
#endif
kfree (card);
@@ -3559,7 +3560,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
via_ac97_cleanup (card);
#ifndef VIA_NDEBUG
- memset (card, 0xAB, sizeof (*card)); /* poison memory */
+ memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
#endif
kfree (card);
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index c33642d8d9a1..497ed6b20060 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -888,8 +888,9 @@ static int __devinit snd_bt87x_probe(struct pci_dev *pci,
strcpy(card->driver, "Bt87x");
sprintf(card->shortname, "Brooktree Bt%x", pci->device);
- sprintf(card->longname, "%s at %#lx, irq %i",
- card->shortname, pci_resource_start(pci, 0), chip->irq);
+ sprintf(card->longname, "%s at %#llx, irq %i",
+ card->shortname, (unsigned long long)pci_resource_start(pci, 0),
+ chip->irq);
strcpy(card->mixername, "Bt87x");
err = snd_card_register(card);
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index f0a48693d687..5450a9e8f133 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -143,7 +143,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
if (dma->periods == periods && dma->period_bytes == period_bytes)
return 0;
- /* the u32 cast is okay because in snd*create we succesfully told
+ /* the u32 cast is okay because in snd*create we successfully told
pci alloc that we're only 32 bit capable so the uppper will be 0 */
addr = (u32) substream->runtime->dma_addr;
desc_addr = (u32) dma->desc_buf.addr;
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index dcf402948347..e5511606af04 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1441,10 +1441,10 @@ static int __devinit snd_sonic_probe(struct pci_dev *pci,
strcpy(card->driver, "SonicVibes");
strcpy(card->shortname, "S3 SonicVibes");
- sprintf(card->longname, "%s rev %i at 0x%lx, irq %i",
+ sprintf(card->longname, "%s rev %i at 0x%llx, irq %i",
card->shortname,
sonic->revision,
- pci_resource_start(pci, 1),
+ (unsigned long long)pci_resource_start(pci, 1),
sonic->irq);
if ((err = snd_sonicvibes_pcm(sonic, 0, NULL)) < 0) {
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index b678814975c9..be98f6377339 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1170,9 +1170,10 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
chip->rsrc[i].start + 1,
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
- " %d (%s: 0x%08lx:%08lx)\n",
- i, rnames[i], chip->rsrc[i].start,
- chip->rsrc[i].end);
+ " %d (%s: 0x%016lx:%016lx)\n",
+ i, rnames[i],
+ (unsigned long long)chip->rsrc[i].start,
+ (unsigned long long)chip->rsrc[i].end);
err = -ENODEV;
goto __error;
}
@@ -1201,9 +1202,10 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
chip->rsrc[i].start + 1,
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
- " %d (%s: 0x%08lx:%08lx)\n",
- i, rnames[i], chip->rsrc[i].start,
- chip->rsrc[i].end);
+ " %d (%s: 0x%016llx:%016llx)\n",
+ i, rnames[i],
+ (unsigned long long)chip->rsrc[i].start,
+ (unsigned long long)chip->rsrc[i].end);
err = -ENODEV;
goto __error;
}
diff --git a/sound/ppc/toonie.c b/sound/ppc/toonie.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/sound/ppc/toonie.c
+++ /dev/null
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index da54d04a3e3a..d9d14c2707db 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -2037,10 +2037,10 @@ static int __init cs4231_sbus_attach(struct sbus_dev *sdev)
if (err)
return err;
- sprintf(card->longname, "%s at 0x%02lx:0x%08lx, irq %d",
+ sprintf(card->longname, "%s at 0x%02lx:0x%016lx, irq %d",
card->shortname,
rp->flags & 0xffL,
- rp->start,
+ (unsigned long long)rp->start,
sdev->irqs[0]);
if ((err = snd_cs4231_sbus_create(card, sdev, dev, &cp)) < 0) {
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 5eecdd09a79d..a7489a3dd75a 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2645,9 +2645,9 @@ static int __init dbri_attach(int prom_node, struct sbus_dev *sdev)
strcpy(card->driver, "DBRI");
strcpy(card->shortname, "Sun DBRI");
rp = &sdev->resource[0];
- sprintf(card->longname, "%s at 0x%02lx:0x%08lx, irq %d",
+ sprintf(card->longname, "%s at 0x%02lx:0x%016lx, irq %d",
card->shortname,
- rp->flags & 0xffL, rp->start, irq.pri);
+ rp->flags & 0xffL, (unsigned long long)rp->start, irq.pri);
if ((err = snd_dbri_create(card, sdev, &irq, dev)) < 0) {
snd_card_free(card);
diff --git a/usr/Makefile b/usr/Makefile
index 19d74e6f2685..e93824269da2 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -21,8 +21,7 @@ ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
$(CONFIG_INITRAMFS_SOURCE),-d)
ramfs-args := \
$(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
- $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) \
- $(ramfs-input)
+ $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
# .initramfs_data.cpio.gz.d is used to identify all files included
# in initramfs and to detect if any files are added/removed.